0001: /*
0002: * Helma License Notice
0003: *
0004: * The contents of this file are subject to the Helma License
0005: * Version 2.0 (the "License"). You may not use this file except in
0006: * compliance with the License. A copy of the License is available at
0007: * http://adele.helma.org/download/helma/license.txt
0008: *
0009: * Copyright 1998-2003 Helma Software. All Rights Reserved.
0010: *
0011: * $RCSfile$
0012: * $Author: hannes $
0013: * $Revision: 8690 $
0014: * $Date: 2007-12-10 11:47:41 +0100 (Mon, 10 Dez 2007) $
0015: */
0016:
0017: package helma.objectmodel.db;
0018:
0019: import helma.framework.core.Application;
0020: import helma.framework.core.RequestEvaluator;
0021: import helma.objectmodel.*;
0022: import helma.objectmodel.dom.XmlDatabase;
0023:
0024: import java.io.*;
0025: import java.math.BigDecimal;
0026: import java.sql.*;
0027: import java.util.*;
0028:
0029: import org.apache.commons.logging.Log;
0030: import org.apache.commons.logging.LogFactory;
0031:
0032: /**
0033: * The NodeManager is responsible for fetching Nodes from the internal or
0034: * external data sources, caching them in a least-recently-used Hashtable,
0035: * and writing changes back to the databases.
0036: */
0037: public final class NodeManager {
0038:
0039: protected Application app;
0040: private ObjectCache cache;
0041: protected IDatabase db;
0042: protected IDGenerator idgen;
0043: private boolean logSql;
0044: private Log sqlLog = null;
0045: protected boolean logReplication;
0046: private ArrayList listeners = new ArrayList();
0047:
0048: // a wrapper that catches some Exceptions while accessing this NM
0049: public final WrappedNodeManager safe;
0050:
0051: /**
0052: * Create a new NodeManager for Application app.
0053: */
0054: public NodeManager(Application app) {
0055: this .app = app;
0056: safe = new WrappedNodeManager(this );
0057: }
0058:
0059: /**
0060: * Initialize the NodeManager for the given dbHome and
0061: * application properties. An embedded database will be
0062: * created in dbHome if one doesn't already exist.
0063: */
0064: public void init(File dbHome, Properties props)
0065: throws DatabaseException, ClassNotFoundException,
0066: IllegalAccessException, InstantiationException {
0067: String cacheImpl = props.getProperty("cacheimpl",
0068: "helma.util.CacheMap");
0069:
0070: cache = (ObjectCache) Class.forName(cacheImpl).newInstance();
0071: cache.init(app);
0072:
0073: String idgenImpl = props.getProperty("idGeneratorImpl");
0074:
0075: if (idgenImpl != null) {
0076: idgen = (IDGenerator) Class.forName(idgenImpl)
0077: .newInstance();
0078: idgen.init(app);
0079: }
0080:
0081: logSql = "true".equalsIgnoreCase(props.getProperty("logsql"));
0082: logReplication = "true".equalsIgnoreCase(props
0083: .getProperty("logReplication"));
0084:
0085: String replicationUrl = props.getProperty("replicationUrl");
0086:
0087: if (replicationUrl != null) {
0088: if (logReplication) {
0089: app.logEvent("Setting up replication listener at "
0090: + replicationUrl);
0091: }
0092:
0093: Replicator replicator = new Replicator(this );
0094: replicator.addUrl(replicationUrl);
0095: addNodeChangeListener(replicator);
0096: }
0097:
0098: db = new XmlDatabase();
0099: db.init(dbHome, app);
0100: }
0101:
0102: /**
0103: * Gets the application's root node.
0104: */
0105: public Node getRootNode() throws Exception {
0106: DbMapping rootMapping = app.getRootMapping();
0107: DbKey key = new DbKey(rootMapping, app.getRootId());
0108: Node node = getNode(key);
0109: if (node != null && rootMapping != null) {
0110: node.setDbMapping(rootMapping);
0111: node.setPrototype(rootMapping.getTypeName());
0112: }
0113: return node;
0114: }
0115:
0116: /**
0117: * Checks if the given node is the application's root node.
0118: */
0119: public boolean isRootNode(Node node) {
0120: return node.getState() != Node.TRANSIENT
0121: && app.getRootId().equals(node.getID())
0122: && DbMapping.areStorageCompatible(app.getRootMapping(),
0123: node.getDbMapping());
0124: }
0125:
0126: /**
0127: * app.properties file has been updated. Reread some settings.
0128: */
0129: public void updateProperties(Properties props) {
0130: // notify the cache about the properties update
0131: cache.updateProperties(props);
0132: logSql = "true".equalsIgnoreCase(props.getProperty("logsql"));
0133: logReplication = "true".equalsIgnoreCase(props
0134: .getProperty("logReplication"));
0135: }
0136:
0137: /**
0138: * Shut down this node manager. This is called when the application
0139: * using this node manager is stopped.
0140: */
0141: public void shutdown() throws DatabaseException {
0142: db.shutdown();
0143:
0144: if (cache != null) {
0145: cache.shutdown();
0146: cache = null;
0147: }
0148:
0149: if (idgen != null) {
0150: idgen.shutdown();
0151: }
0152: }
0153:
0154: /**
0155: * Delete a node from the database.
0156: */
0157: public void deleteNode(Node node) throws Exception {
0158: if (node != null) {
0159: synchronized (this ) {
0160: Transactor tx = (Transactor) Thread.currentThread();
0161:
0162: node.setState(Node.INVALID);
0163: deleteNode(db, tx.txn, node);
0164: }
0165: }
0166: }
0167:
0168: /**
0169: * Get a node by key. This is called from a node that already holds
0170: * a reference to another node via a NodeHandle/Key.
0171: */
0172: public Node getNode(Key key) throws Exception {
0173: Transactor tx = (Transactor) Thread.currentThread();
0174:
0175: // See if Transactor has already come across this node
0176: Node node = tx.getCleanNode(key);
0177:
0178: if ((node != null) && (node.getState() != Node.INVALID)) {
0179: return node;
0180: }
0181:
0182: // try to get the node from the shared cache
0183: node = (Node) cache.get(key);
0184:
0185: if ((node == null) || (node.getState() == Node.INVALID)) {
0186: // The requested node isn't in the shared cache.
0187: if (key instanceof SyntheticKey) {
0188: Node parent = getNode(key.getParentKey());
0189: Relation rel = parent.dbmap.getPropertyRelation(key
0190: .getID());
0191:
0192: if (rel != null) {
0193: return getNode(parent, key.getID(), rel);
0194: } else {
0195: return null;
0196: }
0197: } else if (key instanceof DbKey) {
0198: node = getNodeByKey(tx.txn, (DbKey) key);
0199: }
0200:
0201: if (node != null) {
0202: node = registerNewNode(node, null);
0203: }
0204: }
0205:
0206: if (node != null) {
0207: tx.visitCleanNode(key, node);
0208: }
0209:
0210: return node;
0211: }
0212:
0213: /**
0214: * Get a node by relation, using the home node, the relation and a key to apply.
0215: * In contrast to getNode (Key key), this is usually called when we don't yet know
0216: * whether such a node exists.
0217: */
0218: public Node getNode(Node home, String kstr, Relation rel)
0219: throws Exception {
0220: if (kstr == null) {
0221: return null;
0222: }
0223:
0224: Transactor tx = (Transactor) Thread.currentThread();
0225:
0226: Key key;
0227: DbMapping otherDbm = rel == null ? null : rel.otherType;
0228: // check what kind of object we're looking for and make an apropriate key
0229: if (rel.isComplexReference()) {
0230: // a key for a complex reference
0231: key = new MultiKey(rel.otherType, rel.getKeyParts(home));
0232: otherDbm = app.getDbMapping(key.getStorageName());
0233: } else if (rel.createOnDemand()) {
0234: // a key for a virtually defined object that's never actually stored in the db
0235: // or a key for an object that represents subobjects grouped by some property,
0236: // generated on the fly
0237: key = new SyntheticKey(home.getKey(), kstr);
0238: } else {
0239: // Not a relation we can use getNodeByRelation() for.
0240: return null;
0241: }
0242:
0243: // See if Transactor has already come across this node
0244: Node node = tx.getCleanNode(key);
0245:
0246: if ((node != null) && (node.getState() != Node.INVALID)) {
0247: // we used to refresh the node in the main cache here to avoid the primary key
0248: // entry being flushed from cache before the secondary one
0249: // (risking duplicate nodes in cache) but we don't need to since we fetched
0250: // the node from the threadlocal transactor cache and didn't refresh it in the
0251: // main cache.
0252: return node;
0253: }
0254:
0255: // try to get the node from the shared cache
0256: node = (Node) cache.get(key);
0257:
0258: // check if we can use the cached node without further checks.
0259: // we need further checks for subnodes fetched by name if the subnodes were changed.
0260: if ((node != null) && (node.getState() != Node.INVALID)) {
0261: // check if node is null node (cached null)
0262: if (node.isNullNode()) {
0263: if (node.created != home.getLastSubnodeChange(rel)) {
0264: node = null; // cached null not valid anymore
0265: }
0266: } else if (!rel.virtual) {
0267: // apply different consistency checks for groupby nodes and database nodes:
0268: // for group nodes, check if they're contained
0269: if (rel.groupby != null) {
0270: if (home.contains(node) < 0) {
0271: node = null;
0272: }
0273:
0274: // for database nodes, check if constraints are fulfilled
0275: } else if (!rel.usesPrimaryKey()) {
0276: if (!rel.checkConstraints(home, node)) {
0277: node = null;
0278: }
0279: }
0280: }
0281: }
0282:
0283: if ((node == null) || (node.getState() == Node.INVALID)) {
0284: // The requested node isn't in the shared cache.
0285: // Synchronize with key to make sure only one version is fetched
0286: // from the database.
0287: node = getNodeByRelation(tx.txn, home, kstr, rel, otherDbm);
0288:
0289: if (node != null && node.getState() != Node.DELETED) {
0290: Node newNode = node;
0291: if (key.equals(node.getKey())) {
0292: node = registerNewNode(node, null);
0293: } else {
0294: node = registerNewNode(node, key);
0295: }
0296: // reset create time of old node, otherwise Relation.checkConstraints
0297: // will reject it under certain circumstances.
0298: if (node != newNode) {
0299: node.created = node.lastmodified;
0300: }
0301: } else {
0302: // node fetched from db is null, cache result using nullNode
0303: synchronized (cache) {
0304: cache.put(key, new Node(home
0305: .getLastSubnodeChange(rel)));
0306:
0307: // we ignore the case that onother thread has created the node in the meantime
0308: return null;
0309: }
0310: }
0311: } else if (node.isNullNode()) {
0312: // the nullNode caches a null value, i.e. an object that doesn't exist
0313: return null;
0314: } else {
0315: // update primary key in cache to keep it from being flushed, see above
0316: if (!rel.usesPrimaryKey()
0317: && node.getState() != Node.TRANSIENT) {
0318: synchronized (cache) {
0319: Node old = (Node) cache.put(node.getKey(), node);
0320:
0321: if (old != node && old != null && !old.isNullNode()
0322: && old.getState() != Node.INVALID) {
0323: cache.put(node.getKey(), old);
0324: cache.put(key, old);
0325: node = old;
0326: }
0327: }
0328: }
0329: }
0330:
0331: if (node != null) {
0332: tx.visitCleanNode(key, node);
0333: }
0334:
0335: return node;
0336: }
0337:
0338: /**
0339: * Register a newly created node in the node cache unless it it is already contained.
0340: * If so, the previously registered node is kept and returned. Otherwise, the onInit()
0341: * function is called on the new node and it is returned.
0342: * @param node the node to register
0343: * @return the newly registered node, or the one that was already registered with the node's key
0344: */
0345: private Node registerNewNode(Node node, Key secondaryKey) {
0346: Key key = node.getKey();
0347:
0348: synchronized (cache) {
0349: Node old = (Node) cache.put(key, node);
0350:
0351: if (old != null && !old.isNullNode()
0352: && old.getState() != INode.INVALID) {
0353: cache.put(key, old);
0354: if (secondaryKey != null) {
0355: cache.put(secondaryKey, old);
0356: }
0357: return old;
0358: } else if (secondaryKey != null) {
0359: cache.put(secondaryKey, node);
0360: }
0361: }
0362: // New node is going ot be used, invoke onInit() on it
0363: // Invoke onInit() if it is defined by this Node's prototype
0364: try {
0365: // We need to reach deap into helma.framework.core to invoke onInit(),
0366: // but the functionality is really worth it.
0367: RequestEvaluator reval = app.getCurrentRequestEvaluator();
0368: if (reval != null) {
0369: reval.invokeDirectFunction(node, "onInit",
0370: RequestEvaluator.EMPTY_ARGS);
0371: }
0372: } catch (Exception x) {
0373: app.logError("Error invoking onInit()", x);
0374: }
0375: return node;
0376: }
0377:
0378: /**
0379: * Register a node in the node cache.
0380: */
0381: public void registerNode(Node node) {
0382: cache.put(node.getKey(), node);
0383: }
0384:
0385: /**
0386: * Register a node in the node cache using the key argument.
0387: */
0388: protected void registerNode(Node node, Key key) {
0389: cache.put(key, node);
0390: }
0391:
0392: /**
0393: * Remove a node from the node cache. If at a later time it is accessed again,
0394: * it will be refetched from the database.
0395: */
0396: public void evictNode(Node node) {
0397: node.setState(INode.INVALID);
0398: cache.remove(node.getKey());
0399: }
0400:
0401: /**
0402: * Remove a node from the node cache. If at a later time it is accessed again,
0403: * it will be refetched from the database.
0404: */
0405: public void evictNodeByKey(Key key) {
0406: Node n = (Node) cache.remove(key);
0407:
0408: if (n != null) {
0409: n.setState(INode.INVALID);
0410:
0411: if (!(key instanceof DbKey)) {
0412: cache.remove(n.getKey());
0413: }
0414: }
0415: }
0416:
0417: /**
0418: * Used when a key stops being valid for a node. The cached node itself
0419: * remains valid, if it is present in the cache by other keys.
0420: */
0421: public void evictKey(Key key) {
0422: cache.remove(key);
0423: // also drop key from thread-local transactor cache
0424: if (Thread.currentThread() instanceof Transactor) {
0425: ((Transactor) Thread.currentThread()).dropCleanNode(key);
0426: }
0427: }
0428:
0429: ////////////////////////////////////////////////////////////////////////
0430: // methods to do the actual db work
0431: ////////////////////////////////////////////////////////////////////////
0432:
0433: /**
0434: * Insert a new node in the embedded database or a relational database table,
0435: * depending on its db mapping.
0436: */
0437: public void insertNode(IDatabase db, ITransaction txn, Node node)
0438: throws IOException, SQLException, ClassNotFoundException {
0439: invokeOnPersist(node);
0440: DbMapping dbm = node.getDbMapping();
0441:
0442: if ((dbm == null) || !dbm.isRelational()) {
0443: db.insertNode(txn, node.getID(), node);
0444: } else {
0445: insertRelationalNode(node, dbm, dbm.getConnection());
0446: }
0447: }
0448:
0449: /**
0450: * Insert a node into a different (relational) database than its default one.
0451: */
0452: public void exportNode(Node node, DbSource dbs)
0453: throws SQLException, ClassNotFoundException {
0454: if (node == null) {
0455: throw new IllegalArgumentException(
0456: "Node can't be null in exportNode");
0457: }
0458:
0459: DbMapping dbm = node.getDbMapping();
0460:
0461: if (dbs == null) {
0462: throw new IllegalArgumentException(
0463: "DbSource can't be null in exportNode");
0464: } else if ((dbm == null) || !dbm.isRelational()) {
0465: throw new IllegalArgumentException(
0466: "Can't export into non-relational database");
0467: } else {
0468: insertRelationalNode(node, dbm, dbs.getConnection());
0469: }
0470: }
0471:
0472: /**
0473: * Insert a node into a different (relational) database than its default one.
0474: */
0475: public void exportNode(Node node, DbMapping dbm)
0476: throws SQLException, ClassNotFoundException {
0477: if (node == null) {
0478: throw new IllegalArgumentException(
0479: "Node can't be null in exportNode");
0480: }
0481:
0482: if (dbm == null) {
0483: throw new IllegalArgumentException(
0484: "DbMapping can't be null in exportNode");
0485: } else if (!dbm.isRelational()) {
0486: throw new IllegalArgumentException(
0487: "Can't export into non-relational database");
0488: } else {
0489: insertRelationalNode(node, dbm, dbm.getConnection());
0490: }
0491: }
0492:
0493: /**
0494: * Insert a node into a relational database.
0495: */
0496: protected void insertRelationalNode(Node node, DbMapping dbm,
0497: Connection con) throws ClassNotFoundException, SQLException {
0498:
0499: if (con == null) {
0500: throw new NullPointerException(
0501: "Error inserting relational node: Connection is null");
0502: }
0503:
0504: // set connection to write mode
0505: if (con.isReadOnly())
0506: con.setReadOnly(false);
0507:
0508: String insertString = dbm.getInsert();
0509: PreparedStatement stmt = con.prepareStatement(insertString);
0510:
0511: // app.logEvent ("inserting relational node: " + node.getID ());
0512: DbColumn[] columns = dbm.getColumns();
0513:
0514: long logTimeStart = logSql ? System.currentTimeMillis() : 0;
0515:
0516: try {
0517: int columnNumber = 1;
0518:
0519: for (int i = 0; i < columns.length; i++) {
0520: DbColumn col = columns[i];
0521: if (!col.isMapped())
0522: continue;
0523: if (col.isIdField()) {
0524: setStatementValue(stmt, columnNumber, node.getID(),
0525: col);
0526: } else if (col.isPrototypeField()) {
0527: setStatementValue(stmt, columnNumber, dbm
0528: .getExtensionId(), col);
0529: } else {
0530: Relation rel = col.getRelation();
0531: Property p = rel == null ? null : node
0532: .getProperty(rel.getPropName());
0533:
0534: if (p != null) {
0535: setStatementValue(stmt, columnNumber, p, col
0536: .getType());
0537: } else if (col.isNameField()) {
0538: stmt.setString(columnNumber, node.getName());
0539: } else {
0540: stmt.setNull(columnNumber, col.getType());
0541: }
0542: }
0543: columnNumber += 1;
0544: }
0545: stmt.executeUpdate();
0546:
0547: } finally {
0548: if (logSql) {
0549: long logTimeStop = java.lang.System.currentTimeMillis();
0550: logSqlStatement("SQL INSERT", dbm.getTableName(),
0551: logTimeStart, logTimeStop, insertString);
0552: }
0553: if (stmt != null) {
0554: try {
0555: stmt.close();
0556: } catch (Exception ignore) {
0557: }
0558: }
0559: }
0560: }
0561:
0562: /**
0563: * calls onPersist function for the HopObject
0564: */
0565: private void invokeOnPersist(Node node) {
0566: try {
0567: // We need to reach deap into helma.framework.core to invoke onPersist(),
0568: // but the functionality is really worth it.
0569: RequestEvaluator reval = app.getCurrentRequestEvaluator();
0570: if (reval != null) {
0571: reval.invokeDirectFunction(node, "onPersist",
0572: RequestEvaluator.EMPTY_ARGS);
0573: }
0574: } catch (Exception x) {
0575: app.logError("Error invoking onPersist()", x);
0576: }
0577: }
0578:
0579: /**
0580: * Updates a modified node in the embedded db or an external relational database, depending
0581: * on its database mapping.
0582: *
0583: * @return true if the DbMapping of the updated Node is to be marked as updated via
0584: * DbMapping.setLastDataChange
0585: */
0586: public boolean updateNode(IDatabase db, ITransaction txn, Node node)
0587: throws IOException, SQLException, ClassNotFoundException {
0588:
0589: invokeOnPersist(node);
0590: DbMapping dbm = node.getDbMapping();
0591: boolean markMappingAsUpdated = false;
0592:
0593: if ((dbm == null) || !dbm.isRelational()) {
0594: db.updateNode(txn, node.getID(), node);
0595: } else {
0596: Hashtable propMap = node.getPropMap();
0597: Property[] props;
0598:
0599: if (propMap == null) {
0600: props = new Property[0];
0601: } else {
0602: props = new Property[propMap.size()];
0603: propMap.values().toArray(props);
0604: }
0605:
0606: // make sure table meta info is loaded by dbmapping
0607: dbm.getColumns();
0608:
0609: StringBuffer b = dbm.getUpdate();
0610:
0611: // comma flag set after the first dirty column, also tells as
0612: // if there are dirty columns at all
0613: boolean comma = false;
0614:
0615: for (int i = 0; i < props.length; i++) {
0616: // skip clean properties
0617: if ((props[i] == null) || !props[i].dirty) {
0618: // null out clean property so we don't consider it later
0619: props[i] = null;
0620: continue;
0621: }
0622:
0623: Relation rel = dbm.propertyToRelation(props[i]
0624: .getName());
0625:
0626: // skip readonly, virtual and collection relations
0627: if ((rel == null) || rel.readonly || rel.virtual
0628: || (!rel.isPrimitiveOrReference())) {
0629: // null out property so we don't consider it later
0630: props[i] = null;
0631: continue;
0632: }
0633:
0634: if (comma) {
0635: b.append(", ");
0636: } else {
0637: comma = true;
0638: }
0639:
0640: b.append(rel.getDbField());
0641: b.append(" = ?");
0642: }
0643:
0644: // if no columns were updated, return false
0645: if (!comma) {
0646: return false;
0647: }
0648:
0649: b.append(" WHERE ");
0650: dbm.appendCondition(b, dbm.getIDField(), node.getID());
0651:
0652: Connection con = dbm.getConnection();
0653: // set connection to write mode
0654: if (con.isReadOnly())
0655: con.setReadOnly(false);
0656: PreparedStatement stmt = con.prepareStatement(b.toString());
0657:
0658: int stmtNumber = 0;
0659: long logTimeStart = logSql ? System.currentTimeMillis() : 0;
0660:
0661: try {
0662: for (int i = 0; i < props.length; i++) {
0663: Property p = props[i];
0664:
0665: if (p == null) {
0666: continue;
0667: }
0668:
0669: Relation rel = dbm.propertyToRelation(p.getName());
0670:
0671: stmtNumber++;
0672: setStatementValue(stmt, stmtNumber, p, rel
0673: .getColumnType());
0674:
0675: p.dirty = false;
0676:
0677: if (!rel.isPrivate()) {
0678: markMappingAsUpdated = true;
0679: }
0680: }
0681:
0682: stmt.executeUpdate();
0683:
0684: } finally {
0685: if (logSql) {
0686: long logTimeStop = System.currentTimeMillis();
0687: logSqlStatement("SQL UPDATE", dbm.getTableName(),
0688: logTimeStart, logTimeStop, b.toString());
0689: }
0690: if (stmt != null) {
0691: try {
0692: stmt.close();
0693: } catch (Exception ignore) {
0694: }
0695: }
0696: }
0697: }
0698:
0699: // update may cause changes in the node's parent subnode array
0700: // TODO: is this really needed anymore?
0701: if (markMappingAsUpdated && node.isAnonymous()) {
0702: Node parent = node.getCachedParent();
0703:
0704: if (parent != null) {
0705: parent.markSubnodesChanged();
0706: }
0707: }
0708:
0709: return markMappingAsUpdated;
0710: }
0711:
0712: /**
0713: * Performs the actual deletion of a node from either the embedded or an external
0714: * SQL database.
0715: */
0716: public void deleteNode(IDatabase db, ITransaction txn, Node node)
0717: throws Exception {
0718: DbMapping dbm = node.getDbMapping();
0719:
0720: if ((dbm == null) || !dbm.isRelational()) {
0721: db.deleteNode(txn, node.getID());
0722: } else {
0723: Statement st = null;
0724: long logTimeStart = logSql ? System.currentTimeMillis() : 0;
0725: String str = new StringBuffer("DELETE FROM ").append(
0726: dbm.getTableName()).append(" WHERE ").append(
0727: dbm.getIDField()).append(" = ")
0728: .append(node.getID()).toString();
0729:
0730: try {
0731: Connection con = dbm.getConnection();
0732: // set connection to write mode
0733: if (con.isReadOnly())
0734: con.setReadOnly(false);
0735:
0736: st = con.createStatement();
0737:
0738: st.executeUpdate(str);
0739:
0740: } finally {
0741: if (logSql) {
0742: long logTimeStop = System.currentTimeMillis();
0743: logSqlStatement("SQL DELETE", dbm.getTableName(),
0744: logTimeStart, logTimeStop, str);
0745: }
0746: if (st != null) {
0747: try {
0748: st.close();
0749: } catch (Exception ignore) {
0750: }
0751: }
0752: }
0753: }
0754:
0755: // node may still be cached via non-primary keys. mark as invalid
0756: node.setState(Node.INVALID);
0757: }
0758:
0759: /**
0760: * Generate a new ID for a given type, delegating to our IDGenerator if set.
0761: */
0762: public String generateID(DbMapping map) throws Exception {
0763: if (idgen != null) {
0764: // use our custom IDGenerator
0765: return idgen.generateID(map);
0766: } else {
0767: return doGenerateID(map);
0768: }
0769: }
0770:
0771: /**
0772: * Actually generates an ID, using a method matching the given DbMapping.
0773: */
0774: public String doGenerateID(DbMapping map) throws Exception {
0775: if ((map == null) || !map.isRelational()) {
0776: // use embedded db id generator
0777: return generateEmbeddedID(map);
0778: }
0779: String idMethod = map.getIDgen();
0780: if (idMethod == null || "[max]".equalsIgnoreCase(idMethod)
0781: || map.isMySQL()) {
0782: // use select max as id generator
0783: return generateMaxID(map);
0784: } else if ("[hop]".equalsIgnoreCase(idMethod)) {
0785: // use embedded db id generator
0786: return generateEmbeddedID(map);
0787: } else {
0788: // use db sequence as id generator
0789: return generateSequenceID(map);
0790: }
0791: }
0792:
0793: /**
0794: * Gererates an ID for use with the embedded database.
0795: */
0796: synchronized String generateEmbeddedID(DbMapping map)
0797: throws Exception {
0798: return db.nextID();
0799: }
0800:
0801: /**
0802: * Generates an ID for the table by finding out the maximum current value
0803: */
0804: synchronized String generateMaxID(DbMapping map) throws Exception {
0805: String retval = null;
0806: Statement stmt = null;
0807: long logTimeStart = logSql ? System.currentTimeMillis() : 0;
0808: String q = new StringBuffer("SELECT MAX(").append(
0809: map.getIDField()).append(") FROM ").append(
0810: map.getTableName()).toString();
0811:
0812: try {
0813: Connection con = map.getConnection();
0814: // set connection to read-only mode
0815: if (!con.isReadOnly())
0816: con.setReadOnly(true);
0817:
0818: stmt = con.createStatement();
0819:
0820: ResultSet rs = stmt.executeQuery(q);
0821:
0822: // check for empty table
0823: if (!rs.next()) {
0824: long currMax = map.getNewID(0);
0825:
0826: retval = Long.toString(currMax);
0827: } else {
0828: long currMax = rs.getLong(1);
0829:
0830: currMax = map.getNewID(currMax);
0831: retval = Long.toString(currMax);
0832: }
0833: } finally {
0834: if (logSql) {
0835: long logTimeStop = System.currentTimeMillis();
0836: logSqlStatement("SQL SELECT_MAX", map.getTableName(),
0837: logTimeStart, logTimeStop, q);
0838: }
0839: if (stmt != null) {
0840: try {
0841: stmt.close();
0842: } catch (Exception ignore) {
0843: }
0844: }
0845: }
0846:
0847: return retval;
0848: }
0849:
0850: String generateSequenceID(DbMapping map) throws Exception {
0851: Statement stmt = null;
0852: String retval = null;
0853: long logTimeStart = logSql ? System.currentTimeMillis() : 0;
0854: String q;
0855: if (map.isOracle()) {
0856: q = new StringBuffer("SELECT ").append(map.getIDgen())
0857: .append(".nextval FROM dual").toString();
0858: } else if (map.isPostgreSQL() || map.isH2()) {
0859: q = new StringBuffer("SELECT nextval('").append(
0860: map.getIDgen()).append("')").toString();
0861: } else {
0862: throw new RuntimeException(
0863: "Unable to generate sequence: unknown DB");
0864: }
0865:
0866: try {
0867: Connection con = map.getConnection();
0868: // TODO is it necessary to set connection to write mode here?
0869: if (con.isReadOnly())
0870: con.setReadOnly(false);
0871:
0872: stmt = con.createStatement();
0873:
0874: ResultSet rs = stmt.executeQuery(q);
0875:
0876: if (!rs.next()) {
0877: throw new SQLException(
0878: "Error creating ID from Sequence: empty recordset");
0879: }
0880:
0881: retval = rs.getString(1);
0882: } finally {
0883: if (logSql) {
0884: long logTimeStop = System.currentTimeMillis();
0885: logSqlStatement("SQL SELECT_NEXTVAL", map
0886: .getTableName(), logTimeStart, logTimeStop, q);
0887: }
0888: if (stmt != null) {
0889: try {
0890: stmt.close();
0891: } catch (Exception ignore) {
0892: }
0893: }
0894: }
0895:
0896: return retval;
0897: }
0898:
0899: /**
0900: * Loades subnodes via subnode relation. Only the ID index is loaded, the nodes are
0901: * loaded later on demand.
0902: */
0903: public SubnodeList getNodeIDs(Node home, Relation rel)
0904: throws Exception {
0905:
0906: if ((rel == null) || (rel.otherType == null)
0907: || !rel.otherType.isRelational()) {
0908: // this should never be called for embedded nodes
0909: throw new RuntimeException(
0910: "NodeMgr.getNodeIDs called for non-relational node "
0911: + home);
0912: } else {
0913: SubnodeList retval = home.createSubnodeList();
0914:
0915: // if we do a groupby query (creating an intermediate layer of groupby nodes),
0916: // retrieve the value of that field instead of the primary key
0917: String idfield = (rel.groupby == null) ? rel.otherType
0918: .getIDField() : rel.groupby;
0919: Connection con = rel.otherType.getConnection();
0920: // set connection to read-only mode
0921: if (!con.isReadOnly())
0922: con.setReadOnly(true);
0923:
0924: String table = rel.otherType.getTableName();
0925:
0926: Statement stmt = null;
0927: long logTimeStart = logSql ? System.currentTimeMillis() : 0;
0928: String query = null;
0929:
0930: try {
0931: StringBuffer b = new StringBuffer("SELECT ");
0932:
0933: if (rel.queryHints != null) {
0934: b.append(rel.queryHints).append(" ");
0935: }
0936:
0937: if (idfield.indexOf('(') == -1
0938: && idfield.indexOf('.') == -1) {
0939: b.append(table).append('.');
0940: }
0941: b.append(idfield).append(" FROM ").append(table);
0942:
0943: rel.appendAdditionalTables(b);
0944:
0945: if (home.getSubnodeRelation() != null) {
0946: // subnode relation was explicitly set
0947: query = b.append(" ").append(
0948: home.getSubnodeRelation()).toString();
0949: } else {
0950: // let relation object build the query
0951: query = b.append(
0952: rel.buildQuery(home, home
0953: .getNonVirtualParent(), null,
0954: " WHERE ", true)).toString();
0955: }
0956:
0957: stmt = con.createStatement();
0958:
0959: if (rel.maxSize > 0) {
0960: stmt.setMaxRows(rel.maxSize);
0961: }
0962:
0963: ResultSet result = stmt.executeQuery(query);
0964:
0965: // problem: how do we derive a SyntheticKey from a not-yet-persistent Node?
0966: Key k = (rel.groupby != null) ? home.getKey() : null;
0967:
0968: while (result.next()) {
0969: String kstr = result.getString(1);
0970:
0971: // jump over null values - this can happen especially when the selected
0972: // column is a group-by column.
0973: if (kstr == null) {
0974: continue;
0975: }
0976:
0977: // make the proper key for the object, either a generic DB key or a groupby key
0978: Key key = (rel.groupby == null) ? (Key) new DbKey(
0979: rel.otherType, kstr)
0980: : (Key) new SyntheticKey(k, kstr);
0981: retval.addSorted(new NodeHandle(key));
0982:
0983: // if these are groupby nodes, evict nullNode keys
0984: if (rel.groupby != null) {
0985: Node n = (Node) cache.get(key);
0986:
0987: if ((n != null) && n.isNullNode()) {
0988: evictKey(key);
0989: }
0990: }
0991: }
0992: } finally {
0993: if (logSql) {
0994: long logTimeStop = System.currentTimeMillis();
0995: logSqlStatement("SQL SELECT_IDS", table,
0996: logTimeStart, logTimeStop, query);
0997: }
0998: if (stmt != null) {
0999: try {
1000: stmt.close();
1001: } catch (Exception ignore) {
1002: }
1003: }
1004: }
1005:
1006: return retval;
1007: }
1008: }
1009:
1010: /**
1011: * Loades subnodes via subnode relation. This is similar to getNodeIDs, but it
1012: * actually loades all nodes in one go, which is better for small node collections.
1013: * This method is used when xxx.loadmode=aggressive is specified.
1014: */
1015: public SubnodeList getNodes(Node home, Relation rel)
1016: throws Exception {
1017: // This does not apply for groupby nodes - use getNodeIDs instead
1018: if (rel.groupby != null) {
1019: return getNodeIDs(home, rel);
1020: }
1021:
1022: if ((rel == null) || (rel.otherType == null)
1023: || !rel.otherType.isRelational()) {
1024: // this should never be called for embedded nodes
1025: throw new RuntimeException(
1026: "NodeMgr.getNodes called for non-relational node "
1027: + home);
1028: } else {
1029: SubnodeList retval = home.createSubnodeList();
1030: DbMapping dbm = rel.otherType;
1031:
1032: Connection con = dbm.getConnection();
1033: // set connection to read-only mode
1034: if (!con.isReadOnly())
1035: con.setReadOnly(true);
1036:
1037: Statement stmt = con.createStatement();
1038: DbColumn[] columns = dbm.getColumns();
1039: Relation[] joins = dbm.getJoins();
1040: String query = null;
1041: long logTimeStart = logSql ? System.currentTimeMillis() : 0;
1042:
1043: try {
1044: StringBuffer b = dbm.getSelect(rel);
1045:
1046: if (home.getSubnodeRelation() != null) {
1047: b.append(home.getSubnodeRelation());
1048: } else {
1049: // let relation object build the query
1050: b.append(rel.buildQuery(home, home
1051: .getNonVirtualParent(), null, " WHERE ",
1052: true));
1053: }
1054:
1055: query = b.toString();
1056:
1057: if (rel.maxSize > 0) {
1058: stmt.setMaxRows(rel.maxSize);
1059: }
1060:
1061: ResultSet rs = stmt.executeQuery(query);
1062:
1063: while (rs.next()) {
1064: // create new Nodes.
1065: Node node = createNode(rel.otherType, rs, columns,
1066: 0);
1067: if (node == null) {
1068: continue;
1069: }
1070: Key primKey = node.getKey();
1071:
1072: retval.addSorted(new NodeHandle(primKey));
1073:
1074: registerNewNode(node, null);
1075:
1076: fetchJoinedNodes(rs, joins, columns.length);
1077: }
1078:
1079: } finally {
1080: if (logSql) {
1081: long logTimeStop = System.currentTimeMillis();
1082: logSqlStatement("SQL SELECT_ALL", dbm
1083: .getTableName(), logTimeStart, logTimeStop,
1084: query);
1085: }
1086: if (stmt != null) {
1087: try {
1088: stmt.close();
1089: } catch (Exception ignore) {
1090: }
1091: }
1092: }
1093:
1094: return retval;
1095: }
1096: }
1097:
1098: /**
1099: * Update a UpdateableSubnodeList retrieving all values having
1100: * higher Values according to the updateCriteria's set for this Collection's Relation
1101: * The returned Map-Object has two Properties:
1102: * addedNodes = an Integer representing the number of Nodes added to this collection
1103: * newNodes = an Integer representing the number of Records returned by the Select-Statement
1104: * These two values may be different if a max-size is defined for this Collection and a new
1105: * node would be outside of this Border because of the ordering of this collection.
1106: * @param home the home of this subnode-list
1107: * @param rel the relation the home-node has to the nodes contained inside the subnodelist
1108: * @return A map having two properties of type String (newNodes (number of nodes retreived by the select-statment), addedNodes (nodes added to the collection))
1109: * @throws Exception
1110: */
1111: public int updateSubnodeList(Node home, Relation rel)
1112: throws Exception {
1113: if ((rel == null) || (rel.otherType == null)
1114: || !rel.otherType.isRelational()) {
1115: // this should never be called for embedded nodes
1116: throw new RuntimeException(
1117: "NodeMgr.updateSubnodeList called for non-relational node "
1118: + home);
1119: } else {
1120: List list = home.getSubnodeList();
1121: if (list == null)
1122: list = home.createSubnodeList();
1123:
1124: if (!(list instanceof UpdateableSubnodeList))
1125: throw new RuntimeException(
1126: "unable to update SubnodeList not marked as updateable ("
1127: + rel.propName + ")");
1128:
1129: UpdateableSubnodeList sublist = (UpdateableSubnodeList) list;
1130:
1131: // FIXME: grouped subnodes aren't supported yet
1132: if (rel.groupby != null)
1133: throw new RuntimeException(
1134: "update not yet supported on grouped collections");
1135:
1136: String idfield = rel.otherType.getIDField();
1137: Connection con = rel.otherType.getConnection();
1138: String table = rel.otherType.getTableName();
1139:
1140: Statement stmt = null;
1141:
1142: try {
1143: String q = null;
1144:
1145: StringBuffer b = new StringBuffer();
1146: if (rel.loadAggressively()) {
1147: b.append(rel.otherType.getSelect(rel));
1148: } else {
1149: b.append("SELECT ");
1150: if (rel.queryHints != null) {
1151: b.append(rel.queryHints).append(" ");
1152: }
1153: b.append(table).append('.').append(idfield).append(
1154: " FROM ").append(table);
1155:
1156: rel.appendAdditionalTables(b);
1157: }
1158: String updateCriteria = sublist.getUpdateCriteria();
1159: if (home.getSubnodeRelation() != null) {
1160: if (updateCriteria != null) {
1161: b.append(" WHERE ");
1162: b.append(sublist.getUpdateCriteria());
1163: b.append(" AND ");
1164: b.append(home.getSubnodeRelation());
1165: } else {
1166: b.append(" WHERE ");
1167: b.append(home.getSubnodeRelation());
1168: }
1169: } else {
1170: if (updateCriteria != null) {
1171: b.append(" WHERE ");
1172: b.append(updateCriteria);
1173: b.append(rel.buildQuery(home, home
1174: .getNonVirtualParent(), null, " AND ",
1175: true));
1176: } else {
1177: b.append(rel.buildQuery(home, home
1178: .getNonVirtualParent(), null,
1179: " WHERE ", true));
1180: }
1181: q = b.toString();
1182: }
1183:
1184: long logTimeStart = logSql ? System.currentTimeMillis()
1185: : 0;
1186:
1187: stmt = con.createStatement();
1188:
1189: if (rel.maxSize > 0) {
1190: stmt.setMaxRows(rel.maxSize);
1191: }
1192:
1193: ResultSet result = stmt.executeQuery(q);
1194:
1195: if (logSql) {
1196: long logTimeStop = System.currentTimeMillis();
1197: logSqlStatement("SQL SELECT_UPDATE_SUBNODE_LIST",
1198: table, logTimeStart, logTimeStop, q);
1199: }
1200:
1201: // problem: how do we derive a SyntheticKey from a not-yet-persistent Node?
1202: // Key k = (rel.groupby != null) ? home.getKey() : null;
1203: // int cntr = 0;
1204:
1205: DbColumn[] columns = rel.loadAggressively() ? rel.otherType
1206: .getColumns()
1207: : null;
1208: List newNodes = new ArrayList(rel.maxSize);
1209: while (result.next()) {
1210: String kstr = result.getString(1);
1211:
1212: // jump over null values - this can happen especially when the selected
1213: // column is a group-by column.
1214: if (kstr == null) {
1215: continue;
1216: }
1217:
1218: // make the proper key for the object, either a generic DB key or a groupby key
1219: Key key;
1220: if (rel.loadAggressively()) {
1221: Node node = createNode(rel.otherType, result,
1222: columns, 0);
1223: if (node == null) {
1224: continue;
1225: }
1226: key = node.getKey();
1227: registerNewNode(node, null);
1228: } else {
1229: key = new DbKey(rel.otherType, kstr);
1230: }
1231: newNodes.add(new NodeHandle(key));
1232:
1233: // if these are groupby nodes, evict nullNode keys
1234: if (rel.groupby != null) {
1235: Node n = (Node) cache.get(key);
1236:
1237: if ((n != null) && n.isNullNode()) {
1238: evictKey(key);
1239: }
1240: }
1241: }
1242: // System.err.println("GOT NEW NODES: " + newNodes);
1243: if (!newNodes.isEmpty())
1244: sublist.addAll(newNodes);
1245: return newNodes.size();
1246: } finally {
1247: if (stmt != null) {
1248: try {
1249: stmt.close();
1250: } catch (Exception ignore) {
1251: }
1252: }
1253: }
1254: }
1255: }
1256:
1257: /**
1258: *
1259: */
1260: public void prefetchNodes(Node home, Relation rel, Key[] keys)
1261: throws Exception {
1262: DbMapping dbm = rel.otherType;
1263:
1264: // this does nothing for objects in the embedded database
1265: if (dbm != null && dbm.isRelational()) {
1266: int missing = cache.containsKeys(keys);
1267:
1268: if (missing > 0) {
1269: Connection con = dbm.getConnection();
1270: // set connection to read-only mode
1271: if (!con.isReadOnly())
1272: con.setReadOnly(true);
1273:
1274: Statement stmt = con.createStatement();
1275: DbColumn[] columns = dbm.getColumns();
1276: Relation[] joins = dbm.getJoins();
1277: String query = null;
1278: long logTimeStart = logSql ? System.currentTimeMillis()
1279: : 0;
1280:
1281: try {
1282: StringBuffer b = dbm.getSelect(null).append(
1283: " WHERE ");
1284: String idfield = (rel.groupby != null) ? rel.groupby
1285: : dbm.getIDField();
1286:
1287: String[] ids = new String[missing];
1288: int j = 0;
1289: for (int k = 0; k < keys.length; k++) {
1290: if (keys[k] != null)
1291: ids[j++] = keys[k].getID();
1292: }
1293:
1294: dbm.appendCondition(b, idfield, ids);
1295: dbm.addJoinConstraints(b, " AND ");
1296:
1297: if (rel.groupby != null) {
1298: rel.renderConstraints(b, home, home
1299: .getNonVirtualParent(), " AND ");
1300:
1301: if (rel.order != null) {
1302: b.append(" ORDER BY ");
1303: b.append(rel.order);
1304: }
1305: }
1306:
1307: query = b.toString();
1308:
1309: ResultSet rs = stmt.executeQuery(query);
1310:
1311: String groupbyProp = null;
1312: HashMap groupbySubnodes = null;
1313:
1314: if (rel.groupby != null) {
1315: groupbyProp = dbm
1316: .columnNameToProperty(rel.groupby);
1317: groupbySubnodes = new HashMap();
1318: }
1319:
1320: String accessProp = null;
1321:
1322: if ((rel.accessName != null)
1323: && !rel.usesPrimaryKey()) {
1324: accessProp = dbm
1325: .columnNameToProperty(rel.accessName);
1326: }
1327:
1328: while (rs.next()) {
1329: // create new Nodes.
1330: Node node = createNode(dbm, rs, columns, 0);
1331: if (node == null) {
1332: continue;
1333: }
1334: Key key = node.getKey();
1335: Key secondaryKey = null;
1336:
1337: // for grouped nodes, collect subnode lists for the intermediary
1338: // group nodes.
1339: String groupName = null;
1340:
1341: if (groupbyProp != null) {
1342: groupName = node.getString(groupbyProp);
1343:
1344: SubnodeList sn = (SubnodeList) groupbySubnodes
1345: .get(groupName);
1346:
1347: if (sn == null) {
1348: sn = new SubnodeList(safe, rel);
1349: groupbySubnodes.put(groupName, sn);
1350: }
1351:
1352: sn.addSorted(new NodeHandle(key));
1353: }
1354:
1355: // if relation doesn't use primary key as accessName, get secondary key
1356: if (accessProp != null) {
1357: String accessName = node
1358: .getString(accessProp);
1359: if (accessName != null) {
1360: if (groupName == null) {
1361: secondaryKey = new SyntheticKey(
1362: home.getKey(), accessName);
1363: } else {
1364: Key groupKey = new SyntheticKey(
1365: home.getKey(), groupName);
1366: secondaryKey = new SyntheticKey(
1367: groupKey, accessName);
1368: }
1369: }
1370:
1371: }
1372:
1373: // register new nodes with the cache. If an up-to-date copy
1374: // existed in the cache, use that.
1375: registerNewNode(node, secondaryKey);
1376:
1377: fetchJoinedNodes(rs, joins, columns.length);
1378: }
1379:
1380: // If these are grouped nodes, build the intermediary group nodes
1381: // with the subnod lists we created
1382: if (groupbyProp != null) {
1383: for (Iterator i = groupbySubnodes.keySet()
1384: .iterator(); i.hasNext();) {
1385: String groupname = (String) i.next();
1386:
1387: if (groupname == null) {
1388: continue;
1389: }
1390:
1391: Node groupnode = home.getGroupbySubnode(
1392: groupname, true);
1393:
1394: groupnode
1395: .setSubnodes((SubnodeList) groupbySubnodes
1396: .get(groupname));
1397: groupnode.lastSubnodeFetch = groupnode
1398: .getLastSubnodeChange(groupnode.dbmap
1399: .getSubnodeRelation());
1400: }
1401: }
1402: } catch (Exception x) {
1403: app.logError("Error in prefetchNodes()", x);
1404: } finally {
1405: if (logSql) {
1406: long logTimeStop = System.currentTimeMillis();
1407: logSqlStatement("SQL SELECT_PREFETCH", dbm
1408: .getTableName(), logTimeStart,
1409: logTimeStop, query);
1410: }
1411: if (stmt != null) {
1412: try {
1413: stmt.close();
1414: } catch (Exception ignore) {
1415: }
1416: }
1417: }
1418: }
1419: }
1420: }
1421:
1422: /**
1423: * Count the nodes contained in the child collection of the home node
1424: * which is defined by Relation rel.
1425: */
1426: public int countNodes(Node home, Relation rel) throws Exception {
1427: if ((rel == null) || (rel.otherType == null)
1428: || !rel.otherType.isRelational()) {
1429: // this should never be called for embedded nodes
1430: throw new RuntimeException(
1431: "NodeMgr.countNodes called for non-relational node "
1432: + home);
1433: } else {
1434: int retval = 0;
1435: Connection con = rel.otherType.getConnection();
1436: // set connection to read-only mode
1437: if (!con.isReadOnly())
1438: con.setReadOnly(true);
1439:
1440: String table = rel.otherType.getTableName();
1441: Statement stmt = null;
1442: long logTimeStart = logSql ? System.currentTimeMillis() : 0;
1443: String query = null;
1444:
1445: try {
1446: StringBuffer tables = new StringBuffer(table);
1447:
1448: rel.appendAdditionalTables(tables);
1449:
1450: // NOTE: we explicitly convert tables StringBuffer to a String
1451: // before appending to be compatible with JDK 1.3
1452: StringBuffer b = new StringBuffer(
1453: "SELECT count(*) FROM ").append(tables
1454: .toString());
1455:
1456: if (home.getSubnodeRelation() != null) {
1457: // use the manually set subnoderelation of the home node
1458: query = b.append(" ").append(
1459: home.getSubnodeRelation()).toString();
1460: } else {
1461: // let relation object build the query
1462: query = b.append(
1463: rel.buildQuery(home, home
1464: .getNonVirtualParent(), null,
1465: " WHERE ", false)).toString();
1466: }
1467:
1468: stmt = con.createStatement();
1469:
1470: ResultSet rs = stmt.executeQuery(query);
1471:
1472: if (!rs.next()) {
1473: retval = 0;
1474: } else {
1475: retval = rs.getInt(1);
1476: }
1477: } finally {
1478: if (logSql) {
1479: long logTimeStop = System.currentTimeMillis();
1480: logSqlStatement("SQL SELECT_COUNT", table,
1481: logTimeStart, logTimeStop, query);
1482: }
1483: if (stmt != null) {
1484: try {
1485: stmt.close();
1486: } catch (Exception ignore) {
1487: }
1488: }
1489: }
1490:
1491: return (rel.maxSize > 0) ? Math.min(rel.maxSize, retval)
1492: : retval;
1493: }
1494: }
1495:
1496: /**
1497: * Similar to getNodeIDs, but returns a Vector that return's the nodes property names instead of IDs
1498: */
1499: public Vector getPropertyNames(Node home, Relation rel)
1500: throws Exception {
1501: if ((rel == null) || (rel.otherType == null)
1502: || !rel.otherType.isRelational()) {
1503: // this should never be called for embedded nodes
1504: throw new RuntimeException(
1505: "NodeMgr.getPropertyNames called for non-relational node "
1506: + home);
1507: } else {
1508: Vector retval = new Vector();
1509:
1510: // if we do a groupby query (creating an intermediate layer of groupby nodes),
1511: // retrieve the value of that field instead of the primary key
1512: String namefield = (rel.groupby == null) ? rel.accessName
1513: : rel.groupby;
1514: Connection con = rel.otherType.getConnection();
1515: // set connection to read-only mode
1516: if (!con.isReadOnly())
1517: con.setReadOnly(true);
1518:
1519: String table = rel.otherType.getTableName();
1520: StringBuffer tables = new StringBuffer(table);
1521: rel.appendAdditionalTables(tables);
1522:
1523: Statement stmt = null;
1524: long logTimeStart = logSql ? System.currentTimeMillis() : 0;
1525: String query = null;
1526:
1527: try {
1528: // NOTE: we explicitly convert tables StringBuffer to a String
1529: // before appending to be compatible with JDK 1.3
1530: StringBuffer b = new StringBuffer("SELECT ").append(
1531: namefield).append(" FROM ").append(
1532: tables.toString());
1533:
1534: if (home.getSubnodeRelation() != null) {
1535: b.append(" ").append(home.getSubnodeRelation());
1536: } else {
1537: // let relation object build the query
1538: b.append(rel.buildQuery(home, home
1539: .getNonVirtualParent(), null, " WHERE ",
1540: true));
1541: }
1542:
1543: stmt = con.createStatement();
1544:
1545: query = b.toString();
1546:
1547: ResultSet rs = stmt.executeQuery(query);
1548:
1549: while (rs.next()) {
1550: String n = rs.getString(1);
1551:
1552: if (n != null) {
1553: retval.addElement(n);
1554: }
1555: }
1556: } finally {
1557: if (logSql) {
1558: long logTimeStop = System.currentTimeMillis();
1559: logSqlStatement("SQL SELECT_ACCESSNAMES", table,
1560: logTimeStart, logTimeStop, query);
1561: }
1562:
1563: if (stmt != null) {
1564: try {
1565: stmt.close();
1566: } catch (Exception ignore) {
1567: }
1568: }
1569: }
1570:
1571: return retval;
1572: }
1573: }
1574:
1575: ///////////////////////////////////////////////////////////////////////////////////////
1576: // private getNode methods
1577: ///////////////////////////////////////////////////////////////////////////////////////
1578: private Node getNodeByKey(ITransaction txn, DbKey key)
1579: throws Exception {
1580: // Note: Key must be a DbKey, otherwise will not work for relational objects
1581: Node node = null;
1582: DbMapping dbm = app.getDbMapping(key.getStorageName());
1583: String kstr = key.getID();
1584:
1585: if ((dbm == null) || !dbm.isRelational()) {
1586: node = (Node) db.getNode(txn, kstr);
1587: node.nmgr = safe;
1588:
1589: if ((node != null) && (dbm != null)) {
1590: node.setDbMapping(dbm);
1591: }
1592: } else {
1593: String idfield = dbm.getIDField();
1594:
1595: Statement stmt = null;
1596: String query = null;
1597: long logTimeStart = logSql ? System.currentTimeMillis() : 0;
1598:
1599: try {
1600: Connection con = dbm.getConnection();
1601: // set connection to read-only mode
1602: if (!con.isReadOnly())
1603: con.setReadOnly(true);
1604:
1605: stmt = con.createStatement();
1606:
1607: DbColumn[] columns = dbm.getColumns();
1608: Relation[] joins = dbm.getJoins();
1609:
1610: StringBuffer b = dbm.getSelect(null).append("WHERE ");
1611: dbm.appendCondition(b, idfield, kstr);
1612: dbm.addJoinConstraints(b, " AND ");
1613: query = b.toString();
1614:
1615: ResultSet rs = stmt.executeQuery(query);
1616:
1617: if (!rs.next()) {
1618: return null;
1619: }
1620: node = createNode(dbm, rs, columns, 0);
1621:
1622: fetchJoinedNodes(rs, joins, columns.length);
1623:
1624: if (rs.next()) {
1625: app
1626: .logError("Warning: More than one value returned for query "
1627: + query);
1628: }
1629: } finally {
1630: if (logSql) {
1631: long logTimeStop = System.currentTimeMillis();
1632: logSqlStatement("SQL SELECT_BYKEY", dbm
1633: .getTableName(), logTimeStart, logTimeStop,
1634: query);
1635: }
1636: if (stmt != null) {
1637: try {
1638: stmt.close();
1639: } catch (Exception ignore) {
1640: // ignore
1641: }
1642: }
1643: }
1644: }
1645:
1646: return node;
1647: }
1648:
1649: private Node getNodeByRelation(ITransaction txn, Node home,
1650: String kstr, Relation rel, DbMapping dbm) throws Exception {
1651: Node node = null;
1652:
1653: if ((rel != null) && rel.virtual) {
1654: if (rel.needsPersistence()) {
1655: node = (Node) home.createNode(kstr);
1656: } else {
1657: node = new Node(home, kstr, safe, rel.prototype);
1658: }
1659:
1660: // set prototype and dbmapping on the newly created virtual/collection node
1661: node.setPrototype(rel.prototype);
1662: node.setDbMapping(rel.getVirtualMapping());
1663: } else if (rel != null && rel.groupby != null) {
1664: node = home.getGroupbySubnode(kstr, false);
1665:
1666: if (node == null && (dbm == null || !dbm.isRelational())) {
1667: node = (Node) db.getNode(txn, kstr);
1668: node.nmgr = safe;
1669: }
1670:
1671: return node;
1672: } else if (rel == null || dbm == null || !dbm.isRelational()) {
1673: node = (Node) db.getNode(txn, kstr);
1674: node.nmgr = safe;
1675: node.setDbMapping(dbm);
1676:
1677: return node;
1678: } else {
1679: Statement stmt = null;
1680: String query = null;
1681: long logTimeStart = logSql ? System.currentTimeMillis() : 0;
1682:
1683: try {
1684: Connection con = dbm.getConnection();
1685: // set connection to read-only mode
1686: if (!con.isReadOnly())
1687: con.setReadOnly(true);
1688: DbColumn[] columns = dbm.getColumns();
1689: Relation[] joins = dbm.getJoins();
1690: StringBuffer b = dbm.getSelect(rel);
1691:
1692: if (home.getSubnodeRelation() != null
1693: && !rel.isComplexReference()) {
1694: // combine our key with the constraints in the manually set subnode relation
1695: b.append(" WHERE ");
1696: dbm.appendCondition(b, rel.accessName, kstr);
1697: // add join contraints in case this is an old oracle style join
1698: dbm.addJoinConstraints(b, " AND ");
1699: // add potential constraints from manually set subnodeRelation
1700: String subrel = home.getSubnodeRelation().trim();
1701: if (subrel.length() > 5) {
1702: b.append(" AND (");
1703: b.append(subrel.substring(5).trim());
1704: b.append(")");
1705: }
1706: } else {
1707: b.append(rel.buildQuery(home, home
1708: .getNonVirtualParent(), dbm, kstr,
1709: " WHERE ", false));
1710: }
1711:
1712: stmt = con.createStatement();
1713:
1714: query = b.toString();
1715:
1716: ResultSet rs = stmt.executeQuery(query);
1717:
1718: if (!rs.next()) {
1719: return null;
1720: }
1721:
1722: node = createNode(dbm, rs, columns, 0);
1723:
1724: fetchJoinedNodes(rs, joins, columns.length);
1725:
1726: if (rs.next()) {
1727: app
1728: .logError("Warning: More than one value returned for query "
1729: + query);
1730: }
1731:
1732: } finally {
1733: if (logSql) {
1734: long logTimeStop = System.currentTimeMillis();
1735: logSqlStatement("SQL SELECT_BYRELATION", dbm
1736: .getTableName(), logTimeStart, logTimeStop,
1737: query);
1738: }
1739: if (stmt != null) {
1740: try {
1741: stmt.close();
1742: } catch (Exception ignore) {
1743: // ignore
1744: }
1745: }
1746: }
1747: }
1748:
1749: return node;
1750: }
1751:
1752: /**
1753: * Create a new Node from a ResultSet.
1754: */
1755: public Node createNode(DbMapping dbm, ResultSet rs,
1756: DbColumn[] columns, int offset) throws SQLException,
1757: IOException, ClassNotFoundException {
1758: HashMap propBuffer = new HashMap();
1759: String id = null;
1760: String name = null;
1761: String protoName = dbm.getTypeName();
1762: DbMapping dbmap = dbm;
1763:
1764: Node node = new Node();
1765:
1766: for (int i = 0; i < columns.length; i++) {
1767:
1768: int columnNumber = i + 1 + offset;
1769:
1770: // set prototype?
1771: if (columns[i].isPrototypeField()) {
1772: String protoId = rs.getString(columnNumber);
1773: protoName = dbm.getPrototypeName(protoId);
1774:
1775: if (protoName != null) {
1776: dbmap = getDbMapping(protoName);
1777:
1778: if (dbmap == null) {
1779: // invalid prototype name!
1780: app
1781: .logError("No prototype defined for prototype mapping \""
1782: + protoName
1783: + "\" - Using default prototype \""
1784: + dbm.getTypeName() + "\".");
1785: dbmap = dbm;
1786: protoName = dbmap.getTypeName();
1787: }
1788: }
1789: }
1790:
1791: // set id?
1792: if (columns[i].isIdField()) {
1793: id = rs.getString(columnNumber);
1794: // if id == null, the object doesn't actually exist - return null
1795: if (id == null) {
1796: return null;
1797: }
1798: }
1799:
1800: // set name?
1801: if (columns[i].isNameField()) {
1802: name = rs.getString(columnNumber);
1803: }
1804:
1805: Property newprop = new Property(node);
1806:
1807: switch (columns[i].getType()) {
1808: case Types.BIT:
1809: case Types.BOOLEAN:
1810: newprop.setBooleanValue(rs.getBoolean(columnNumber));
1811:
1812: break;
1813:
1814: case Types.TINYINT:
1815: case Types.BIGINT:
1816: case Types.SMALLINT:
1817: case Types.INTEGER:
1818: newprop.setIntegerValue(rs.getLong(columnNumber));
1819:
1820: break;
1821:
1822: case Types.REAL:
1823: case Types.FLOAT:
1824: case Types.DOUBLE:
1825: newprop.setFloatValue(rs.getDouble(columnNumber));
1826:
1827: break;
1828:
1829: case Types.DECIMAL:
1830: case Types.NUMERIC:
1831:
1832: BigDecimal num = rs.getBigDecimal(columnNumber);
1833: if (num == null) {
1834: break;
1835: }
1836: if (num.scale() > 0) {
1837: newprop.setFloatValue(num.doubleValue());
1838: } else {
1839: newprop.setIntegerValue(num.longValue());
1840: }
1841:
1842: break;
1843:
1844: case Types.VARBINARY:
1845: case Types.BINARY:
1846: newprop.setJavaObjectValue(rs.getBytes(columnNumber));
1847:
1848: break;
1849:
1850: case Types.BLOB:
1851: case Types.LONGVARBINARY: {
1852: InputStream in = rs.getBinaryStream(columnNumber);
1853: if (in == null) {
1854: break;
1855: }
1856: ByteArrayOutputStream bout = new ByteArrayOutputStream();
1857: byte[] buffer = new byte[2048];
1858: int read;
1859: while ((read = in.read(buffer)) > -1) {
1860: bout.write(buffer, 0, read);
1861: }
1862: newprop.setJavaObjectValue(bout.toByteArray());
1863: }
1864:
1865: break;
1866:
1867: case Types.LONGVARCHAR:
1868: try {
1869: newprop.setStringValue(rs.getString(columnNumber));
1870: } catch (SQLException x) {
1871: Reader in = rs.getCharacterStream(columnNumber);
1872: if (in == null) {
1873: newprop.setStringValue(null);
1874: break;
1875: }
1876: StringBuffer out = new StringBuffer();
1877: char[] buffer = new char[2048];
1878: int read;
1879: while ((read = in.read(buffer)) > -1) {
1880: out.append(buffer, 0, read);
1881: }
1882: newprop.setStringValue(out.toString());
1883: }
1884:
1885: break;
1886:
1887: case Types.CHAR:
1888: case Types.VARCHAR:
1889: case Types.OTHER:
1890: newprop.setStringValue(rs.getString(columnNumber));
1891:
1892: break;
1893:
1894: case Types.DATE:
1895: case Types.TIME:
1896: case Types.TIMESTAMP:
1897: newprop.setDateValue(rs.getTimestamp(columnNumber));
1898:
1899: break;
1900:
1901: case Types.NULL:
1902: newprop.setStringValue(null);
1903:
1904: break;
1905:
1906: case Types.CLOB:
1907: Clob cl = rs.getClob(columnNumber);
1908: if (cl == null) {
1909: newprop.setStringValue(null);
1910: break;
1911: }
1912: char[] c = new char[(int) cl.length()];
1913: Reader isr = cl.getCharacterStream();
1914: isr.read(c);
1915: newprop.setStringValue(String.copyValueOf(c));
1916: break;
1917:
1918: default:
1919: newprop.setStringValue(rs.getString(columnNumber));
1920:
1921: break;
1922: }
1923:
1924: if (rs.wasNull()) {
1925: newprop.setStringValue(null);
1926: }
1927:
1928: propBuffer.put(columns[i].getName(), newprop);
1929:
1930: // mark property as clean, since it's fresh from the db
1931: newprop.dirty = false;
1932: }
1933:
1934: if (id == null) {
1935: return null;
1936: } else if (Thread.currentThread() instanceof Transactor) {
1937: // Check if the node is already registered with the transactor -
1938: // it may be in the process of being DELETED, but do return the
1939: // new node if the old one has been marked as INVALID.
1940: DbKey key = new DbKey(dbmap, id);
1941: Node dirtyNode = ((Transactor) Thread.currentThread())
1942: .getDirtyNode(key);
1943: if (dirtyNode != null
1944: && dirtyNode.getState() != Node.INVALID) {
1945: return dirtyNode;
1946: }
1947: }
1948:
1949: Hashtable propMap = new Hashtable();
1950: DbColumn[] columns2 = dbmap.getColumns();
1951: for (int i = 0; i < columns2.length; i++) {
1952: Relation rel = columns2[i].getRelation();
1953: if (rel != null && rel.isPrimitiveOrReference()) {
1954: Property prop = (Property) propBuffer.get(columns2[i]
1955: .getName());
1956:
1957: if (prop == null) {
1958: continue;
1959: }
1960:
1961: prop.setName(rel.propName);
1962:
1963: // if the property is a pointer to another node, change the property type to NODE
1964: if (rel.isReference() && rel.usesPrimaryKey()) {
1965: // FIXME: References to anything other than the primary key are not supported
1966: prop.convertToNodeReference(rel);
1967: }
1968: propMap.put(rel.propName.toLowerCase(), prop);
1969: }
1970: }
1971:
1972: node.init(dbmap, id, name, protoName, propMap, safe);
1973: return node;
1974: }
1975:
1976: /**
1977: * Fetch nodes that are fetched additionally to another node via join.
1978: */
1979: private void fetchJoinedNodes(ResultSet rs, Relation[] joins,
1980: int offset) throws ClassNotFoundException, SQLException,
1981: IOException {
1982: int resultSetOffset = offset;
1983: // create joined objects
1984: for (int i = 0; i < joins.length; i++) {
1985: DbMapping jdbm = joins[i].otherType;
1986: Node node = createNode(jdbm, rs, jdbm.getColumns(),
1987: resultSetOffset);
1988: if (node != null) {
1989: registerNewNode(node, null);
1990: }
1991: resultSetOffset += jdbm.getColumns().length;
1992: }
1993: }
1994:
1995: /**
1996: * Get a DbMapping for a given prototype name. This is just a proxy
1997: * method to the app's getDbMapping() method.
1998: */
1999: public DbMapping getDbMapping(String protoname) {
2000: return app.getDbMapping(protoname);
2001: }
2002:
2003: /**
2004: * Get an array of the the keys currently held in the object cache
2005: */
2006: public Object[] getCacheEntries() {
2007: return cache.getCachedObjects();
2008: }
2009:
2010: /**
2011: * Get the number of elements in the object cache
2012: */
2013: public int countCacheEntries() {
2014: return cache.size();
2015: }
2016:
2017: /**
2018: * Clear the object cache, causing all objects to be recreated.
2019: */
2020: public void clearCache() {
2021: synchronized (cache) {
2022: cache.clear();
2023: }
2024: }
2025:
2026: /**
2027: * Add a listener that is notified each time a transaction commits
2028: * that adds, modifies or deletes any Nodes.
2029: */
2030: public void addNodeChangeListener(NodeChangeListener listener) {
2031: listeners.add(listener);
2032: }
2033:
2034: /**
2035: * Remove a previously added NodeChangeListener.
2036: */
2037: public void removeNodeChangeListener(NodeChangeListener listener) {
2038: listeners.remove(listener);
2039: }
2040:
2041: /**
2042: * Let transactors know if they should collect and fire NodeChangeListener
2043: * events
2044: */
2045: protected boolean hasNodeChangeListeners() {
2046: return listeners.size() > 0;
2047: }
2048:
2049: /**
2050: * Called by transactors after committing.
2051: */
2052: protected void fireNodeChangeEvent(List inserted, List updated,
2053: List deleted, List parents) {
2054: int l = listeners.size();
2055:
2056: for (int i = 0; i < l; i++) {
2057: try {
2058: ((NodeChangeListener) listeners.get(i)).nodesChanged(
2059: inserted, updated, deleted, parents);
2060: } catch (Error e) {
2061: e.printStackTrace();
2062: } catch (Exception e) {
2063: e.printStackTrace();
2064: }
2065: }
2066: }
2067:
2068: /**
2069: * Receive notification from a remote app that objects in its cache have been
2070: * modified.
2071: */
2072: public void replicateCache(Vector add, Vector delete) {
2073: if (logReplication) {
2074: app.logEvent("Received cache replication event: "
2075: + add.size() + " added, " + delete.size()
2076: + " deleted");
2077: }
2078:
2079: synchronized (cache) {
2080: // long now = System.currentTimeMillis();
2081:
2082: for (Enumeration en = add.elements(); en.hasMoreElements();) {
2083: Node n = (Node) en.nextElement();
2084: DbMapping dbm = app.getDbMapping(n.getPrototype());
2085:
2086: if (dbm != null) {
2087: dbm.setLastDataChange();
2088: }
2089:
2090: n.setDbMapping(dbm);
2091: n.nmgr = safe;
2092:
2093: if (dbm != null && dbm.evictOnReplication()) {
2094: Node oldNode = (Node) cache.get(n.getKey());
2095:
2096: if (oldNode != null) {
2097: evictNode(oldNode);
2098: }
2099: } else {
2100: n.lastParentSet = -1;
2101: cache.put(n.getKey(), n);
2102: }
2103: }
2104:
2105: for (Enumeration en = delete.elements(); en
2106: .hasMoreElements();) {
2107: // NOTE: it would be more efficient to transfer just the keys
2108: // of nodes that are to be deleted.
2109: Node n = (Node) en.nextElement();
2110: DbMapping dbm = app.getDbMapping(n.getPrototype());
2111:
2112: if (dbm != null) {
2113: dbm.setLastDataChange();
2114: }
2115:
2116: n.setDbMapping(dbm);
2117: n.nmgr = safe;
2118:
2119: Node oldNode = (Node) cache.get(n.getKey());
2120:
2121: if (oldNode != null) {
2122: evictNode(oldNode);
2123: }
2124: }
2125: }
2126: }
2127:
2128: private void setStatementValue(PreparedStatement stmt,
2129: int columnNumber, String value, DbColumn col)
2130: throws SQLException {
2131: if (value == null) {
2132: stmt.setNull(columnNumber, col.getType());
2133: } else if (col.needsQuotes()) {
2134: stmt.setString(columnNumber, value);
2135: } else {
2136: stmt.setLong(columnNumber, Long.parseLong(value));
2137: }
2138: }
2139:
2140: private void setStatementValue(PreparedStatement stmt,
2141: int stmtNumber, Property p, int columnType)
2142: throws SQLException {
2143: if (p.getValue() == null) {
2144: stmt.setNull(stmtNumber, columnType);
2145: } else {
2146: switch (columnType) {
2147: case Types.BIT:
2148: case Types.BOOLEAN:
2149: stmt.setBoolean(stmtNumber, p.getBooleanValue());
2150:
2151: break;
2152:
2153: case Types.TINYINT:
2154: case Types.BIGINT:
2155: case Types.SMALLINT:
2156: case Types.INTEGER:
2157: stmt.setLong(stmtNumber, p.getIntegerValue());
2158:
2159: break;
2160:
2161: case Types.REAL:
2162: case Types.FLOAT:
2163: case Types.DOUBLE:
2164: case Types.NUMERIC:
2165: case Types.DECIMAL:
2166: stmt.setDouble(stmtNumber, p.getFloatValue());
2167:
2168: break;
2169:
2170: case Types.LONGVARBINARY:
2171: case Types.VARBINARY:
2172: case Types.BINARY:
2173: case Types.BLOB:
2174: Object b = p.getJavaObjectValue();
2175: if (b instanceof byte[]) {
2176: byte[] buf = (byte[]) b;
2177: try {
2178: stmt.setBytes(stmtNumber, buf);
2179: } catch (SQLException x) {
2180: ByteArrayInputStream bout = new ByteArrayInputStream(
2181: buf);
2182: stmt.setBinaryStream(stmtNumber, bout,
2183: buf.length);
2184: }
2185: } else {
2186: throw new SQLException(
2187: "expected byte[] for binary column '"
2188: + p.getName() + "', found "
2189: + b.getClass());
2190: }
2191:
2192: break;
2193:
2194: case Types.LONGVARCHAR:
2195: try {
2196: stmt.setString(stmtNumber, p.getStringValue());
2197: } catch (SQLException x) {
2198: String str = p.getStringValue();
2199: Reader r = new StringReader(str);
2200: stmt
2201: .setCharacterStream(stmtNumber, r, str
2202: .length());
2203: }
2204:
2205: break;
2206:
2207: case Types.CLOB:
2208: String val = p.getStringValue();
2209: Reader isr = new StringReader(val);
2210: stmt.setCharacterStream(stmtNumber, isr, val.length());
2211:
2212: break;
2213:
2214: case Types.CHAR:
2215: case Types.VARCHAR:
2216: case Types.OTHER:
2217: stmt.setString(stmtNumber, p.getStringValue());
2218:
2219: break;
2220:
2221: case Types.DATE:
2222: case Types.TIME:
2223: case Types.TIMESTAMP:
2224: stmt.setTimestamp(stmtNumber, p.getTimestampValue());
2225:
2226: break;
2227:
2228: case Types.NULL:
2229: stmt.setNull(stmtNumber, 0);
2230:
2231: break;
2232:
2233: default:
2234: stmt.setString(stmtNumber, p.getStringValue());
2235:
2236: break;
2237: }
2238: }
2239: }
2240:
2241: private void logSqlStatement(String type, String table,
2242: long logTimeStart, long logTimeStop, String statement) {
2243: // init sql-log if necessary
2244: if (sqlLog == null) {
2245: String sqlLogName = app.getProperty("sqlLog", "helma."
2246: + app.getName() + ".sql");
2247: sqlLog = LogFactory.getLog(sqlLogName);
2248: }
2249:
2250: sqlLog.info(new StringBuffer().append(type).append(" ").append(
2251: table).append(" ").append((logTimeStop - logTimeStart))
2252: .append(": ").append(statement).toString());
2253: }
2254: }
|