Schema script auto-application

Split NodeStatus transaction out into a shared entity and also record server info
Beginnings of changes required for index rebuilding, both full and incremental


git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@3654 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2006-09-01 17:06:07 +00:00
parent f77c66f906
commit e738ddfdf1
31 changed files with 1926 additions and 351 deletions

View File

@@ -60,7 +60,7 @@ public class PatchExecuter implements ApplicationListener
{
logger.info(I18NUtil.getMessage(MSG_CHECKING));
Date before = new Date(System.currentTimeMillis() - 20000L); // 20 seconds ago
Date before = new Date(System.currentTimeMillis() - 60000L); // 60 seconds ago
patchService.applyOutstandingPatches();
Date after = new Date(System .currentTimeMillis() + 20000L); // 20 seconds ahead

View File

@@ -20,7 +20,7 @@ import org.alfresco.repo.admin.patch.AbstractPatch;
import org.alfresco.service.cmr.admin.PatchException;
/**
* This patch ensures that an upgrade script has been executed. Upgrade scripts
* This patch ensures that an upgrade scriptUrl has been executed. Upgrade scripts
* should create an entry for the patch with the required ID and execution status
* so that the code in this class is never called. If called, an exception message
* is always generated.
@@ -31,26 +31,37 @@ public class SchemaUpgradeScriptPatch extends AbstractPatch
{
private static final String MSG_NOT_EXECUTED = "patch.schemaUpgradeScript.err.not_executed";
private String scriptName;
private String scriptUrl;
public SchemaUpgradeScriptPatch()
{
}
/**
* @return Returns the URL of the scriptUrl that has to have been run
*/
public String getScriptUrl()
{
return scriptUrl;
}
/**
* Set the name of the upgrade script to execute.
* Set the URL of the upgrade scriptUrl to execute. This is the full URL of the
* file, e.g. <b>classpath:alfresco/patch/scripts/upgrade-1.4/${hibernate.dialect.class}/patchAlfrescoSchemaUpdate-1.4-2.sql</b>
* where the <b>${hibernate.dialect.class}</b> placeholder will be substituted with the Hibernate
* <code>Dialect</code> as configured for the system.
*
* @param scriptName the script filename
* @param scriptUrl the scriptUrl filename
*/
public void setScriptName(String scriptName)
public void setScriptUrl(String script)
{
this.scriptName = scriptName;
this.scriptUrl = script;
}
protected void checkProperties()
{
super.checkProperties();
checkPropertyNotNull(scriptName, "scriptName");
checkPropertyNotNull(scriptUrl, "scriptUrl");
}
/**
@@ -59,6 +70,6 @@ public class SchemaUpgradeScriptPatch extends AbstractPatch
@Override
protected String applyInternal() throws Exception
{
throw new PatchException(MSG_NOT_EXECUTED, scriptName);
throw new PatchException(MSG_NOT_EXECUTED, scriptUrl);
}
}

View File

@@ -41,9 +41,9 @@ public interface NodeStatus
public void setNode(Node node);
public String getChangeTxnId();
public Transaction getTransaction();
public void setChangeTxnId(String txnId);
public void setTransaction(Transaction transaction);
public boolean isDeleted();
}

View File

@@ -0,0 +1,33 @@
/*
* Copyright (C) 2006 Alfresco, Inc.
*
* Licensed under the Mozilla Public License version 1.1
* with a permitted attribution clause. You may obtain a
* copy of the License at
*
* http://www.alfresco.org/legal/license.txt
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*/
package org.alfresco.repo.domain;
/**
* Interface for persistent <b>server</b> objects. These persist
* details of the servers that have committed transactions to the
* database, for instance.
*
* @author Derek Hulley
*/
public interface Server
{
public Long getId();
public String getIpAddress();
public void setIpAddress(String ipAddress);
}

View File

@@ -0,0 +1,35 @@
/*
* Copyright (C) 2006 Alfresco, Inc.
*
* Licensed under the Mozilla Public License version 1.1
* with a permitted attribution clause. You may obtain a
* copy of the License at
*
* http://www.alfresco.org/legal/license.txt
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*/
package org.alfresco.repo.domain;
/**
* Interface for persistent <b>transaction</b> objects.
*
* @author Derek Hulley
*/
public interface Transaction
{
public Long getId();
public String getChangeTxnId();
public void setChangeTxnId(String changeTxnId);
public Server getServer();
public void setServer(Server server);
}

View File

@@ -31,8 +31,10 @@ import org.alfresco.repo.domain.Node;
import org.alfresco.repo.domain.NodeKey;
import org.alfresco.repo.domain.NodeStatus;
import org.alfresco.repo.domain.PropertyValue;
import org.alfresco.repo.domain.Server;
import org.alfresco.repo.domain.Store;
import org.alfresco.repo.domain.StoreKey;
import org.alfresco.repo.domain.Transaction;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.service.cmr.dictionary.DataTypeDefinition;
import org.alfresco.service.cmr.repository.StoreRef;
@@ -53,8 +55,11 @@ import org.hibernate.exception.ConstraintViolationException;
public class HibernateNodeTest extends BaseSpringTest
{
private static final String TEST_NAMESPACE = "http://www.alfresco.org/test/HibernateNodeTest";
private static int i = 0;
private Store store;
private Server server;
private Transaction transaction;
public HibernateNodeTest()
{
@@ -68,6 +73,18 @@ public class HibernateNodeTest extends BaseSpringTest
store.setKey(storeKey);
// persist so that it is present in the hibernate cache
getSession().save(store);
server = (Server) getSession().get(ServerImpl.class, new Long(1));
if (server == null)
{
server = new ServerImpl();
server.setIpAddress("" + "i_" + System.currentTimeMillis());
getSession().save(server);
}
transaction = new TransactionImpl();
transaction.setServer(server);
transaction.setChangeTxnId(AlfrescoTransactionSupport.getTransactionId());
getSession().save(transaction);
}
protected void onTearDownInTransaction()
@@ -108,7 +125,7 @@ public class HibernateNodeTest extends BaseSpringTest
// create the node status
NodeStatus nodeStatus = new NodeStatusImpl();
nodeStatus.setKey(key);
nodeStatus.setChangeTxnId("txn:123");
nodeStatus.setTransaction(transaction);
getSession().save(nodeStatus);
// create a new Node
@@ -131,7 +148,7 @@ public class HibernateNodeTest extends BaseSpringTest
node = nodeStatus.getNode();
assertNotNull("Node was not attached to status", node);
// change the values
nodeStatus.setChangeTxnId("txn:456");
transaction.setChangeTxnId("txn:456");
// delete the node
getSession().delete(node);
@@ -351,7 +368,7 @@ public class HibernateNodeTest extends BaseSpringTest
NodeStatus containerNodeStatus = new NodeStatusImpl();
containerNodeStatus.setKey(containerNodeKey);
containerNodeStatus.setNode(containerNode);
containerNodeStatus.setChangeTxnId(AlfrescoTransactionSupport.getTransactionId());
containerNodeStatus.setTransaction(transaction);
getSession().save(containerNodeStatus);
// make content node 1
Node contentNode1 = new NodeImpl();
@@ -366,7 +383,7 @@ public class HibernateNodeTest extends BaseSpringTest
NodeStatus contentNodeStatus1 = new NodeStatusImpl();
contentNodeStatus1.setKey(contentNodeKey1);
contentNodeStatus1.setNode(contentNode1);
contentNodeStatus1.setChangeTxnId(AlfrescoTransactionSupport.getTransactionId());
contentNodeStatus1.setTransaction(transaction);
getSession().save(contentNodeStatus1);
// make content node 2
Node contentNode2 = new NodeImpl();
@@ -381,7 +398,7 @@ public class HibernateNodeTest extends BaseSpringTest
NodeStatus contentNodeStatus2 = new NodeStatusImpl();
contentNodeStatus2.setKey(contentNodeKey2);
contentNodeStatus2.setNode(contentNode2);
contentNodeStatus2.setChangeTxnId(AlfrescoTransactionSupport.getTransactionId());
contentNodeStatus2.setTransaction(transaction);
getSession().save(contentNodeStatus2);
// create an association to content 1
ChildAssoc assoc1 = new ChildAssocImpl();

View File

@@ -113,6 +113,16 @@
<key-property name="identifier" length="100" />
<key-property name="guid" length="36" />
</composite-id>
<!-- forward assoc to transaction -->
<many-to-one
name="transaction"
class="org.alfresco.repo.domain.hibernate.TransactionImpl"
column="transaction_id"
lazy="proxy"
fetch="select"
unique="false"
not-null="true"
cascade="none" />
<!-- forward assoc to node (optional) -->
<many-to-one
name="node"
@@ -122,7 +132,6 @@
fetch="join"
unique="false"
not-null="false" />
<property name="changeTxnId" column="change_txn_id" type="string" length="56" not-null="true" index="CHANGE_TXN_ID"/>
</class>
<class
@@ -137,33 +146,31 @@
<id name="id" column="id" type="long" >
<generator class="native" />
</id>
<natural-id mutable="true">
<!-- forward assoc to parent node -->
<many-to-one
name="parent"
class="org.alfresco.repo.domain.hibernate.NodeImpl"
lazy="proxy"
fetch="select"
optimistic-lock="true"
not-null="true"
unique-key="UIDX_CHILD_NAME" >
<column name="parent_node_id" />
</many-to-one>
<!-- forward assoc to child node -->
<many-to-one
name="child"
lazy="proxy"
fetch="select"
class="org.alfresco.repo.domain.hibernate.NodeImpl"
optimistic-lock="true"
not-null="true" >
<column name="child_node_id" />
</many-to-one>
<property name="typeQName" column="type_qname" type="QName" length="255" not-null="true" unique-key="UIDX_CHILD_NAME" />
</natural-id>
<!-- forward assoc to parent node -->
<many-to-one
name="parent"
class="org.alfresco.repo.domain.hibernate.NodeImpl"
lazy="proxy"
fetch="select"
optimistic-lock="true"
not-null="true"
unique-key="UIDX_CHILD_NAME" >
<column name="parent_node_id" />
</many-to-one>
<!-- forward assoc to child node -->
<many-to-one
name="child"
lazy="proxy"
fetch="select"
class="org.alfresco.repo.domain.hibernate.NodeImpl"
optimistic-lock="true"
not-null="true" >
<column name="child_node_id" />
</many-to-one>
<property name="typeQName" column="type_qname" type="QName" length="255" not-null="true" unique-key="UIDX_CHILD_NAME" />
<property name="qname" column="qname" type="QName" length="255" not-null="true" />
<property name="childNodeName" column="child_node_name" type="string" length="50" not-null="true" unique-key="UIDX_CHILD_NAME" />
<property name="childNodeNameCrc" column="child_node_name_crc" type="long" not-null="true" unique-key="UIDX_CHILD_NAME" />
<property name="qname" column="qname" type="QName" length="255" not-null="true" />
<property name="isPrimary" column="is_primary" />
<property name="index" column="assoc_index" />
</class>
@@ -309,25 +316,26 @@
<query name="node.GetNextChangeTxnIds">
select distinct
status.changeTxnId
transaction.changeTxnId
from
org.alfresco.repo.domain.hibernate.NodeStatusImpl as status
org.alfresco.repo.domain.hibernate.TransactionImpl as transaction
where
status.changeTxnId > :currentTxnId
transaction.changeTxnId > :currentTxnId
order by
status.changeTxnId
transaction.changeTxnId
</query>
<query name="node.GetChangedNodeStatusesCount">
select
count(status.changeTxnId)
count(transaction.changeTxnId)
from
org.alfresco.repo.domain.hibernate.NodeStatusImpl as status
join status.transaction as transaction
where
status.key.protocol = :storeProtocol and
status.key.identifier = :storeIdentifier and
status.node.id is not null and
status.changeTxnId = :changeTxnId
transaction.changeTxnId = :changeTxnId
</query>
<query name="node.GetChangedNodeStatuses">
@@ -335,11 +343,12 @@
status
from
org.alfresco.repo.domain.hibernate.NodeStatusImpl as status
join status.transaction as transaction
where
status.key.protocol = :storeProtocol and
status.key.identifier = :storeIdentifier and
status.node.id is not null and
status.changeTxnId = :changeTxnId
transaction.changeTxnId = :changeTxnId
</query>
<query name="node.GetDeletedNodeStatuses">
@@ -347,11 +356,12 @@
status
from
org.alfresco.repo.domain.hibernate.NodeStatusImpl as status
join status.transaction as transaction
where
status.key.protocol = :storeProtocol and
status.key.identifier = :storeIdentifier and
status.node.id is null and
status.changeTxnId = :changeTxnId
transaction.changeTxnId = :changeTxnId
</query>
<query name="node.GetContentDataStrings">

View File

@@ -21,6 +21,7 @@ import java.io.Serializable;
import org.alfresco.repo.domain.Node;
import org.alfresco.repo.domain.NodeKey;
import org.alfresco.repo.domain.NodeStatus;
import org.alfresco.repo.domain.Transaction;
import org.alfresco.util.EqualsHelper;
/**
@@ -34,15 +35,16 @@ public class NodeStatusImpl implements NodeStatus, Serializable
private NodeKey key;
private Node node;
private String changeTxnId;
private Transaction transaction;
@Override
public String toString()
{
StringBuilder sb = new StringBuilder(50);
sb.append("NodeStatus")
.append("[key=").append(key)
.append(", node=").append(node == null ? null : node.getNodeRef())
.append(", txn=").append(changeTxnId)
.append(", txn=").append(transaction)
.append("]");
return sb.toString();
}
@@ -85,14 +87,14 @@ public class NodeStatusImpl implements NodeStatus, Serializable
this.node = node;
}
public String getChangeTxnId()
public Transaction getTransaction()
{
return changeTxnId;
return transaction;
}
public void setChangeTxnId(String txnId)
public void setTransaction(Transaction transaction)
{
this.changeTxnId = txnId;
this.transaction = transaction;
}
public boolean isDeleted()

View File

@@ -0,0 +1,76 @@
/*
* Copyright (C) 2005 Alfresco, Inc.
*
* Licensed under the Mozilla Public License version 1.1
* with a permitted attribution clause. You may obtain a
* copy of the License at
*
* http://www.alfresco.org/legal/license.txt
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*/
package org.alfresco.repo.domain.hibernate;
import java.io.Serializable;
import org.alfresco.repo.domain.Server;
/**
* Bean containing all the persistence data representing a <b>Server</b>.
* <p>
* This implementation of the {@link org.alfresco.repo.domain.Service Service} interface is
* Hibernate specific.
*
* @author Derek Hulley
*/
public class ServerImpl extends LifecycleAdapter implements Server, Serializable
{
private static final long serialVersionUID = 8063452519040344479L;
private Long id;
private String ipAddress;
public ServerImpl()
{
}
@Override
public String toString()
{
StringBuilder sb = new StringBuilder(50);
sb.append("Server")
.append("[id=").append(id)
.append(", ipAddress=").append(ipAddress)
.append("]");
return sb.toString();
}
public Long getId()
{
return id;
}
/**
* For Hibernate use
*/
@SuppressWarnings("unused")
private void setId(Long id)
{
this.id = id;
}
public String getIpAddress()
{
return ipAddress;
}
public void setIpAddress(String ipAddress)
{
this.ipAddress = ipAddress;
}
}

View File

@@ -0,0 +1,62 @@
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE hibernate-mapping PUBLIC
'-//Hibernate/Hibernate Mapping DTD 3.0//EN'
'http://hibernate.sourceforge.net/hibernate-mapping-3.0.dtd'>
<hibernate-mapping>
<class
name="org.alfresco.repo.domain.hibernate.TransactionImpl"
proxy="org.alfresco.repo.domain.Transaction"
table="alf_transaction"
dynamic-update="false"
dynamic-insert="false"
select-before-update="false"
lazy="true"
optimistic-lock="version" >
<!-- auto-generated ID -->
<id name="id" column="id" type="long" >
<generator class="native" />
</id>
<!-- forward assoc to server IP -->
<many-to-one
name="server"
class="org.alfresco.repo.domain.hibernate.ServerImpl"
column="server_id"
lazy="proxy"
fetch="select"
unique="false"
not-null="false"
cascade="none" />
<property name="changeTxnId" column="change_txn_id" type="string" length="56" not-null="true" index="CHANGE_TXN_ID"/>
</class>
<class
name="org.alfresco.repo.domain.hibernate.ServerImpl"
proxy="org.alfresco.repo.domain.Server"
table="alf_server"
dynamic-update="false"
dynamic-insert="false"
select-before-update="false"
lazy="true"
optimistic-lock="version" >
<!-- auto-generated ID -->
<id name="id" column="id" type="long" >
<generator class="native" />
</id>
<natural-id>
<property name="ipAddress" column="ip_address" type="string" length="15" not-null="true" />
</natural-id>
</class>
<query name="server.getServerByIpAddress">
select
server
from
org.alfresco.repo.domain.hibernate.ServerImpl as server
where
server.ipAddress = :ipAddress
</query>
</hibernate-mapping>

View File

@@ -0,0 +1,88 @@
/*
* Copyright (C) 2005 Alfresco, Inc.
*
* Licensed under the Mozilla Public License version 1.1
* with a permitted attribution clause. You may obtain a
* copy of the License at
*
* http://www.alfresco.org/legal/license.txt
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*/
package org.alfresco.repo.domain.hibernate;
import java.io.Serializable;
import org.alfresco.repo.domain.Server;
import org.alfresco.repo.domain.Transaction;
/**
* Bean containing all the persistence data representing a <b>Transaction</b>.
* <p>
* This implementation of the {@link org.alfresco.repo.domain.Transaction Transaction} interface is
* Hibernate specific.
*
* @author Derek Hulley
*/
public class TransactionImpl extends LifecycleAdapter implements Transaction, Serializable
{
private static final long serialVersionUID = -8264339795578077552L;
private Long id;
private String changeTxnId;
private Server server;
public TransactionImpl()
{
}
@Override
public String toString()
{
StringBuilder sb = new StringBuilder(50);
sb.append("Transaction")
.append("[id=").append(id)
.append(", changeTxnId=").append(changeTxnId)
.append("]");
return sb.toString();
}
public Long getId()
{
return id;
}
/**
* For Hibernate use
*/
@SuppressWarnings("unused")
private void setId(Long id)
{
this.id = id;
}
public String getChangeTxnId()
{
return changeTxnId;
}
public void setChangeTxnId(String changeTransactionId)
{
this.changeTxnId = changeTransactionId;
}
public Server getServer()
{
return server;
}
public void setServer(Server server)
{
this.server = server;
}
}

View File

@@ -0,0 +1,529 @@
/*
* Copyright (C) 2006 Alfresco, Inc.
*
* Licensed under the Mozilla Public License version 1.1
* with a permitted attribution clause. You may obtain a
* copy of the License at
*
* http://www.alfresco.org/legal/license.txt
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*/
package org.alfresco.repo.domain.schema;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.InputStreamReader;
import java.io.Writer;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import org.alfresco.error.AlfrescoRuntimeException;
import org.alfresco.i18n.I18NUtil;
import org.alfresco.repo.admin.patch.impl.SchemaUpgradeScriptPatch;
import org.alfresco.util.TempFileProvider;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.hibernate.Transaction;
import org.hibernate.cfg.Configuration;
import org.hibernate.dialect.Dialect;
import org.hibernate.tool.hbm2ddl.DatabaseMetadata;
import org.hibernate.tool.hbm2ddl.SchemaExport;
import org.springframework.beans.BeansException;
import org.springframework.context.ApplicationEvent;
import org.springframework.context.ApplicationListener;
import org.springframework.context.event.ContextRefreshedEvent;
import org.springframework.orm.hibernate3.LocalSessionFactoryBean;
import org.springframework.util.ResourceUtils;
/**
* Bootstraps the schema and schema update. The schema is considered missing if the applied patch table
* is not present, and the schema is considered empty if the applied patch table is empty.
*
* @author Derek Hulley
*/
public class SchemaBootstrap implements ApplicationListener
{
/** The placeholder for the configured <code>Dialect</code> class name: <b>${db.script.dialect}</b> */
private static final String PLACEHOLDER_SCRIPT_DIALECT = "\\$\\{db\\.script\\.dialect\\}";
private static final String MSG_EXECUTING_SCRIPT = "schema.update.msg.executing_script";
private static final String ERR_UPDATE_FAILED = "schema.update.err.update_failed";
private static final String ERR_VALIDATION_FAILED = "schema.update.err.validation_failed";
private static final String ERR_SCRIPT_NOT_RUN = "schema.update.err.update_script_not_run";
private static final String ERR_SCRIPT_NOT_FOUND = "schema.update.err.script_not_found";
private static final String ERR_STATEMENT_TERMINATOR = "schema.update.err.statement_terminator";
private static Log logger = LogFactory.getLog(SchemaBootstrap.class);
private LocalSessionFactoryBean localSessionFactory;
private String schemaOuputFilename;
private boolean updateSchema;
private List<String> postCreateScriptUrls;
private List<SchemaUpgradeScriptPatch> validateUpdateScriptPatches;
private List<SchemaUpgradeScriptPatch> applyUpdateScriptPatches;
public SchemaBootstrap()
{
postCreateScriptUrls = new ArrayList<String>(1);
validateUpdateScriptPatches = new ArrayList<SchemaUpgradeScriptPatch>(4);
applyUpdateScriptPatches = new ArrayList<SchemaUpgradeScriptPatch>(4);
}
public void setLocalSessionFactory(LocalSessionFactoryBean localSessionFactory) throws BeansException
{
this.localSessionFactory = localSessionFactory;
}
/**
* Set this to output the full database creation script
*
* @param schemaOuputFilename the name of a file to dump the schema to, or null to ignore
*/
public void setSchemaOuputFilename(String schemaOuputFilename)
{
this.schemaOuputFilename = schemaOuputFilename;
}
/**
* Set whether to modify the schema or not. Either way, the schema will be validated.
*
* @param updateSchema true to update and validate the schema, otherwise false to just
* validate the schema. Default is <b>true</b>.
*/
public void setUpdateSchema(boolean updateSchema)
{
this.updateSchema = updateSchema;
}
/**
* Set the scripts that must be executed after the schema has been created.
*
* @param postCreateScriptUrls file URLs
*
* @see #PLACEHOLDER_SCRIPT_DIALECT
*/
public void setPostCreateScriptUrls(List<String> postUpdateScriptUrls)
{
this.postCreateScriptUrls = postUpdateScriptUrls;
}
/**
* Set the schema script patches that must have been applied. These will not be
* applied to the database. These can be used where the script <u>cannot</u> be
* applied automatically or where a particular upgrade path is no longer supported.
* For example, at version 3.0, the upgrade scripts for version 1.4 may be considered
* unsupported - this doesn't prevent the manual application of the scripts, though.
*
* @param applyUpdateScriptPatches a list of schema patches to check
*/
public void setValidateUpdateScriptPatches(List<SchemaUpgradeScriptPatch> scriptPatches)
{
this.validateUpdateScriptPatches = scriptPatches;
}
/**
* Set the schema script patches that may be executed during an update.
*
* @param applyUpdateScriptPatches a list of schema patches to check
*/
public void setApplyUpdateScriptPatches(List<SchemaUpgradeScriptPatch> scriptPatches)
{
this.applyUpdateScriptPatches = scriptPatches;
}
public void onApplicationEvent(ApplicationEvent event)
{
if (!(event instanceof ContextRefreshedEvent))
{
// only work on startup
return;
}
// do everything in a transaction
Session session = getLocalSessionFactory().openSession();
Transaction transaction = session.beginTransaction();
try
{
// make sure that we don't autocommit
Connection connection = session.connection();
connection.setAutoCommit(false);
Configuration cfg = localSessionFactory.getConfiguration();
// dump the schema, if required
if (schemaOuputFilename != null)
{
File schemaOutputFile = new File(schemaOuputFilename);
dumpSchemaCreate(cfg, schemaOutputFile);
}
// update the schema, if required
if (updateSchema)
{
updateSchema(cfg, session, connection);
}
// verify that all patches have been applied correctly
checkSchemaPatchScripts(cfg, session, connection, validateUpdateScriptPatches, false); // check scripts
checkSchemaPatchScripts(cfg, session, connection, applyUpdateScriptPatches, false); // check scripts
// all done successfully
transaction.commit();
}
catch (Throwable e)
{
try { transaction.rollback(); } catch (Throwable ee) {}
if (updateSchema)
{
throw new AlfrescoRuntimeException(ERR_UPDATE_FAILED, e);
}
else
{
throw new AlfrescoRuntimeException(ERR_VALIDATION_FAILED, e);
}
}
}
private void dumpSchemaCreate(Configuration cfg, File schemaOutputFile)
{
// if the file exists, delete it
if (schemaOutputFile.exists())
{
schemaOutputFile.delete();
}
SchemaExport schemaExport = new SchemaExport(cfg)
.setFormat(true)
.setHaltOnError(true)
.setOutputFile(schemaOutputFile.getAbsolutePath())
.setDelimiter(";");
schemaExport.execute(false, false, false, true);
}
private SessionFactory getLocalSessionFactory()
{
return (SessionFactory) localSessionFactory.getObject();
}
/**
* @return Returns the number of applied patches
*/
private int countAppliedPatches(Connection connection) throws Exception
{
Statement stmt = connection.createStatement();
try
{
ResultSet rs = stmt.executeQuery("select count(id) from alf_applied_patch");
rs.next();
int count = rs.getInt(1);
return count;
}
catch (Throwable e)
{
// we'll try another table name
}
finally
{
try { stmt.close(); } catch (Throwable e) {}
}
// for pre-1.4 databases, the table was named differently
stmt = connection.createStatement();
try
{
ResultSet rs = stmt.executeQuery("select count(id) from applied_patch");
rs.next();
int count = rs.getInt(1);
return count;
}
finally
{
try { stmt.close(); } catch (Throwable e) {}
}
}
/**
* @return Returns the number of applied patches
*/
private boolean didPatchSucceed(Connection connection, String patchId) throws Exception
{
Statement stmt = connection.createStatement();
try
{
ResultSet rs = stmt.executeQuery("select succeeded from alf_applied_patch where id = '" + patchId + "'");
if (!rs.next())
{
return false;
}
boolean succeeded = rs.getBoolean(1);
return succeeded;
}
catch (Throwable e)
{
// we'll try another table name
}
finally
{
try { stmt.close(); } catch (Throwable e) {}
}
// for pre-1.4 databases, the table was named differently
stmt = connection.createStatement();
try
{
ResultSet rs = stmt.executeQuery("select succeeded from applied_patch where id = '" + patchId + "'");
if (!rs.next())
{
return false;
}
boolean succeeded = rs.getBoolean(1);
return succeeded;
}
finally
{
try { stmt.close(); } catch (Throwable e) {}
}
}
/**
* Builds the schema from scratch or applies the necessary patches to the schema.
*/
private void updateSchema(Configuration cfg, Session session, Connection connection) throws Exception
{
boolean create = false;
try
{
countAppliedPatches(connection);
}
catch (Throwable e)
{
create = true;
}
if (create)
{
// the applied patch table is missing - we assume that all other tables are missing
// perform a full update using Hibernate-generated statements
File tempFile = TempFileProvider.createTempFile("AlfrescoSchemaCreate", ".sql");
dumpSchemaCreate(cfg, tempFile);
executeScriptFile(cfg, connection, tempFile);
// execute post-create scripts (not patches)
for (String scriptUrl : this.postCreateScriptUrls)
{
executeScriptUrl(cfg, connection, scriptUrl);
}
}
else
{
// we have a database, so just run the update scripts
checkSchemaPatchScripts(cfg, session, connection, validateUpdateScriptPatches, false); // check for scripts that must have been run
checkSchemaPatchScripts(cfg, session, connection, applyUpdateScriptPatches, true); // execute scripts as required
// let Hibernate do any required updates
File tempFile = null;
Writer writer = null;
try
{
final Dialect dialect = Dialect.getDialect(cfg.getProperties());
DatabaseMetadata metadata = new DatabaseMetadata(connection, dialect);
String[] sqls = cfg.generateSchemaUpdateScript(dialect, metadata);
if (sqls.length > 0)
{
tempFile = TempFileProvider.createTempFile("AlfrescoSchemaUpdate", ".sql");
writer = new BufferedWriter(new FileWriter(tempFile));
for (String sql : sqls)
{
writer.append(sql);
writer.append(";\n");
}
}
}
finally
{
if (writer != null)
{
try {writer.close();} catch (Throwable e) {}
}
}
// execute if there were changes raised by Hibernate
if (tempFile != null)
{
executeScriptFile(cfg, connection, tempFile);
}
}
}
/**
* Check that the necessary scripts have been executed against the database
*/
private void checkSchemaPatchScripts(
Configuration cfg,
Session session,
Connection connection,
List<SchemaUpgradeScriptPatch> scriptPatches,
boolean apply) throws Exception
{
// first check if there have been any applied patches
int appliedPatchCount = countAppliedPatches(connection);
if (appliedPatchCount == 0)
{
// This is a new schema, so upgrade scripts are irrelevant
// and patches will not have been applied yet
return;
}
for (SchemaUpgradeScriptPatch patch : scriptPatches)
{
final String patchId = patch.getId();
final String scriptUrl = patch.getScriptUrl();
// check if the script was successfully executed
boolean wasSuccessfullyApplied = didPatchSucceed(connection, patchId);
if (wasSuccessfullyApplied)
{
// nothing to do - it has been done before
continue;
}
else if (!apply)
{
// the script was not run and may not be run automatically
throw AlfrescoRuntimeException.create(ERR_SCRIPT_NOT_RUN, scriptUrl);
}
// it wasn't run and it can be run now
executeScriptUrl(cfg, connection, scriptUrl);
}
}
private void executeScriptUrl(Configuration cfg, Connection connection, String scriptUrl) throws Exception
{
Dialect dialect = Dialect.getDialect(cfg.getProperties());
File scriptFile = getScriptFile(dialect.getClass(), scriptUrl);
// check that it exists
if (scriptFile == null)
{
throw AlfrescoRuntimeException.create(ERR_SCRIPT_NOT_FOUND, scriptUrl);
}
// now execute it
executeScriptFile(cfg, connection, scriptFile);
}
/**
* Replaces the dialect placeholder in the script URL and attempts to find a file for
* it. If not found, the dialect hierarchy will be walked until a compatible script is
* found. This makes it possible to have scripts that are generic to all dialects.
*
* @return Returns the file if found, otherwise null
*/
private File getScriptFile(Class dialectClazz, String scriptUrl) throws Exception
{
// replace the dialect placeholder
String dialectScriptUrl = scriptUrl.replaceAll(PLACEHOLDER_SCRIPT_DIALECT, dialectClazz.getName());
// get a handle on the resource
try
{
File scriptFile = ResourceUtils.getFile(dialectScriptUrl);
if (scriptFile.exists())
{
// found a compatible dialect version
return scriptFile;
}
}
catch (FileNotFoundException e)
{
// doesn't exist
}
// it wasn't found. Get the superclass of the dialect and try again
Class superClazz = dialectClazz.getSuperclass();
if (Dialect.class.isAssignableFrom(superClazz))
{
// we still have a Dialect - try again
return getScriptFile(superClazz, scriptUrl);
}
else
{
// we have exhausted all options
return null;
}
}
private void executeScriptFile(Configuration cfg, Connection connection, File scriptFile) throws Exception
{
logger.info(I18NUtil.getMessage(MSG_EXECUTING_SCRIPT, scriptFile));
BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(scriptFile), "UTF8"));
try
{
int line = 0;
// loop through all statements
StringBuilder sb = new StringBuilder(1024);
while(true)
{
String sql = reader.readLine();
line++;
if (sql == null)
{
// nothing left in the file
break;
}
// trim it
sql = sql.trim();
if (sql.length() == 0 ||
sql.startsWith( "--" ) ||
sql.startsWith( "//" ) ||
sql.startsWith( "/*" ) )
{
if (sb.length() > 0)
{
// we have an unterminated statement
throw AlfrescoRuntimeException.create(ERR_STATEMENT_TERMINATOR, (line - 1), scriptFile);
}
// there has not been anything to execute - it's just a comment line
continue;
}
// have we reached the end of a statement?
boolean execute = false;
if (sql.endsWith(";"))
{
sql = sql.substring(0, sql.length() - 1);
execute = true;
}
// append to the statement being built up
sb.append(" ").append(sql);
// execute, if required
if (execute)
{
Statement stmt = connection.createStatement();
try
{
sql = sb.toString();
if (logger.isDebugEnabled())
{
logger.debug("Executing statment: " + sql);
}
stmt.execute(sql);
sb = new StringBuilder(1024);
}
finally
{
try { stmt.close(); } catch (Throwable e) {}
}
}
}
}
finally
{
try { reader.close(); } catch (Throwable e) {}
}
}
}

View File

@@ -1630,10 +1630,7 @@ public abstract class BaseNodeServiceTest extends BaseSpringTest
NodeRef defRef = pathDefRef.getChildRef();
// now browse down using the node service
NodeRef checkParentRef = nodeService.getChildByName(rootNodeRef, ASSOC_TYPE_QNAME_TEST_CHILDREN, parentRef.getId());
assertNotNull("First level, non-named node not found", checkParentRef);
assertEquals(parentRef, checkParentRef);
NodeRef checkAbcRef = nodeService.getChildByName(checkParentRef, ASSOC_TYPE_QNAME_TEST_CONTAINS, "abc");
NodeRef checkAbcRef = nodeService.getChildByName(parentRef, ASSOC_TYPE_QNAME_TEST_CONTAINS, "abc");
assertNotNull("Second level, named node 'ABC' not found", checkAbcRef);
assertEquals(abcRef, checkAbcRef);
NodeRef checkDefRef = nodeService.getChildByName(checkAbcRef, ASSOC_TYPE_QNAME_TEST_CONTAINS, "def");

View File

@@ -148,7 +148,7 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl
else
{
return new NodeRef.Status(
nodeStatus.getChangeTxnId(),
nodeStatus.getTransaction().getChangeTxnId(),
nodeStatus.isDeleted());
}
}
@@ -1446,11 +1446,11 @@ public class DbNodeServiceImpl extends AbstractNodeServiceImpl
// update old status
NodeStatus oldNodeStatus = nodeDaoService.getNodeStatus(oldNodeRef, true);
oldNodeStatus.setNode(null);
oldNodeStatus.setChangeTxnId(txnId);
oldNodeStatus.getTransaction().setChangeTxnId(txnId);
// create the new status
NodeStatus newNodeStatus = nodeDaoService.getNodeStatus(newNodeRef, true);
newNodeStatus.setNode(nodeToMove);
newNodeStatus.setChangeTxnId(txnId);
newNodeStatus.getTransaction().setChangeTxnId(txnId);
}
}

View File

@@ -16,9 +16,14 @@
*/
package org.alfresco.repo.node.db.hibernate;
import java.io.Serializable;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import java.util.zip.CRC32;
import org.alfresco.error.AlfrescoRuntimeException;
@@ -28,13 +33,17 @@ import org.alfresco.repo.domain.Node;
import org.alfresco.repo.domain.NodeAssoc;
import org.alfresco.repo.domain.NodeKey;
import org.alfresco.repo.domain.NodeStatus;
import org.alfresco.repo.domain.Server;
import org.alfresco.repo.domain.Store;
import org.alfresco.repo.domain.StoreKey;
import org.alfresco.repo.domain.Transaction;
import org.alfresco.repo.domain.hibernate.ChildAssocImpl;
import org.alfresco.repo.domain.hibernate.NodeAssocImpl;
import org.alfresco.repo.domain.hibernate.NodeImpl;
import org.alfresco.repo.domain.hibernate.NodeStatusImpl;
import org.alfresco.repo.domain.hibernate.ServerImpl;
import org.alfresco.repo.domain.hibernate.StoreImpl;
import org.alfresco.repo.domain.hibernate.TransactionImpl;
import org.alfresco.repo.node.db.NodeDaoService;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.TransactionalDao;
@@ -71,9 +80,14 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
private static final String QUERY_GET_TARGET_ASSOCS = "node.GetTargetAssocs";
private static final String QUERY_GET_SOURCE_ASSOCS = "node.GetSourceAssocs";
private static final String QUERY_GET_CONTENT_DATA_STRINGS = "node.GetContentDataStrings";
private static final String QUERY_GET_SERVER_BY_IPADDRESS = "server.getServerByIpAddress";
/** a uuid identifying this unique instance */
private String uuid;
private final String uuid;
private final ReadLock serverReadLock;
private final WriteLock serverWriteLock;
private Server server;
/**
*
@@ -81,6 +95,10 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
public HibernateNodeDaoServiceImpl()
{
this.uuid = GUID.generate();
ReentrantReadWriteLock serverReadWriteLock = new ReentrantReadWriteLock();
serverReadLock = serverReadWriteLock.readLock();
serverWriteLock = serverReadWriteLock.writeLock();
}
/**
@@ -108,6 +126,93 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
return uuid.hashCode();
}
/**
* Gets/creates the <b>server</b> instance to use for the life of this instance
*/
private Server getServer()
{
// get readlock
serverReadLock.lock();
try
{
if (server != null)
{
return server;
}
}
finally
{
serverReadLock.unlock();
}
// get the write lock
serverWriteLock.lock();
try
{
final String ipAddress = InetAddress.getLocalHost().getHostAddress();
HibernateCallback callback = new HibernateCallback()
{
public Object doInHibernate(Session session)
{
Query query = session
.getNamedQuery(HibernateNodeDaoServiceImpl.QUERY_GET_SERVER_BY_IPADDRESS)
.setString("ipAddress", ipAddress);
return query.uniqueResult();
}
};
server = (Server) getHibernateTemplate().execute(callback);
// create it if it doesn't exist
if (server == null)
{
server = new ServerImpl();
server.setIpAddress(ipAddress);
try
{
getSession().save(server);
}
catch (DataIntegrityViolationException e)
{
// get it again
server = (Server) getHibernateTemplate().execute(callback);
if (server == null)
{
throw new AlfrescoRuntimeException("Unable to create server instance: " + ipAddress);
}
}
}
return server;
}
catch (Exception e)
{
throw new AlfrescoRuntimeException("Failed to create server instance", e);
}
finally
{
serverWriteLock.unlock();
}
}
private static final String RESOURCE_KEY_TRANSACTION_ID = "hibernate.transaction.id";
private Transaction getCurrentTransaction()
{
Transaction transaction = null;
Serializable txnId = (Serializable) AlfrescoTransactionSupport.getResource(RESOURCE_KEY_TRANSACTION_ID);
if (txnId == null)
{
// no transaction instance has been bound to the transaction
transaction = new TransactionImpl();
transaction.setChangeTxnId(AlfrescoTransactionSupport.getTransactionId());
transaction.setServer(getServer());
txnId = getHibernateTemplate().save(transaction);
// bind the id
AlfrescoTransactionSupport.bindResource(RESOURCE_KEY_TRANSACTION_ID, txnId);
}
else
{
transaction = (Transaction) getHibernateTemplate().get(TransactionImpl.class, txnId);
}
return transaction;
}
/**
* Does this <tt>Session</tt> contain any changes which must be
* synchronized with the store?
@@ -218,7 +323,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
{
status = new NodeStatusImpl();
status.setKey(nodeKey);
status.setChangeTxnId(AlfrescoTransactionSupport.getTransactionId());
status.setTransaction(getCurrentTransaction());
getHibernateTemplate().save(status);
}
// done
@@ -237,7 +342,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
}
else
{
status.setChangeTxnId(AlfrescoTransactionSupport.getTransactionId());
status.getTransaction().setChangeTxnId(AlfrescoTransactionSupport.getTransactionId());
}
}
@@ -259,13 +364,13 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
// If that is the case, then the session has to be flushed so that the database
// constraints aren't violated as the node creation will write to the database to
// get an ID
if (status.getChangeTxnId().equals(AlfrescoTransactionSupport.getTransactionId()))
if (status.getTransaction().getChangeTxnId().equals(AlfrescoTransactionSupport.getTransactionId()))
{
// flush
getHibernateTemplate().flush();
}
}
// build a concrete node based on a bootstrap type
Node node = new NodeImpl();
// set other required properties
@@ -277,7 +382,11 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
// set required status properties
status.setNode(node);
status.setChangeTxnId(AlfrescoTransactionSupport.getTransactionId());
// assign a transaction
if (status.getTransaction() == null)
{
status.setTransaction(getCurrentTransaction());
}
// persist the nodestatus
getHibernateTemplate().save(status);
@@ -331,7 +440,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
NodeRef nodeRef = node.getNodeRef();
NodeStatus nodeStatus = getNodeStatus(nodeRef, true);
nodeStatus.setNode(null);
nodeStatus.setChangeTxnId(AlfrescoTransactionSupport.getTransactionId());
nodeStatus.getTransaction().setChangeTxnId(AlfrescoTransactionSupport.getTransactionId());
// finally delete the node
getHibernateTemplate().delete(node);
// flush to ensure constraints can't be violated
@@ -371,7 +480,7 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
{
/*
* This initial child association creation will fail IFF there is already
* an association of the given type between the two nodes. For new association
* an association of the given type and name between the two nodes. For new association
* creation, this can only occur if two transactions attempt to create a secondary
* child association between the same two nodes. As this is unlikely, it is
* appropriate to just throw a runtime exception and let the second transaction
@@ -383,28 +492,18 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
* if the association is recreated subsequently.
*/
String uuid = childNode.getUuid();
// assign a random name to the node
String randomName = GUID.generate();
ChildAssoc assoc = new ChildAssocImpl();
assoc.setTypeQName(assocTypeQName);
assoc.setChildNodeName(getShortName(uuid));
assoc.setChildNodeNameCrc(getCrc(uuid));
assoc.setChildNodeName(randomName);
assoc.setChildNodeNameCrc(-1L); // random names compete only with each other
assoc.setQname(qname);
assoc.setIsPrimary(isPrimary);
assoc.buildAssociation(parentNode, childNode);
// persist it, catching the duplicate child name
try
{
getHibernateTemplate().save(assoc);
}
catch (DataIntegrityViolationException e)
{
throw new AlfrescoRuntimeException("An association already exists between the two nodes: \n" +
" parent: " + parentNode.getId() + "\n" +
" child: " + childNode.getId() + "\n" +
" assoc: " + assocTypeQName,
e);
}
getHibernateTemplate().save(assoc);
// done
return assoc;
}
@@ -422,17 +521,22 @@ public class HibernateNodeDaoServiceImpl extends HibernateDaoSupport implements
*/
String childNameNew = null;
long crc = -1;
if (childName == null)
{
childNameNew = childAssoc.getChild().getUuid();
// random names compete only with each other, i.e. not at all
childNameNew = GUID.generate();
crc = -1;
}
else
{
// assigned names compete exactly
childNameNew = childName.toLowerCase();
crc = getCrc(childNameNew);
}
final String childNameNewShort = getShortName(childNameNew);
final long childNameNewCrc = getCrc(childNameNew);
final long childNameNewCrc = crc;
// check if the name has changed
if (childAssoc.getChildNodeNameCrc() == childNameNewCrc)

View File

@@ -62,13 +62,13 @@ import org.springframework.orm.hibernate3.support.HibernateDaoSupport;
* database is static then the L2 cache usage can be set to use
* the <code>NORMAL</code> mode. <code>REFRESH</code> should be
* used where the server will still be accessed from some clients
* despite the database changing.
* despite the database changing. <code>NORMAL</code> can be used
* in the case of the caches being clustered, i.e. the caches will
* not be out of date w.r.t. the database.
* </li>
* <li>
* This process should not run continuously on a live
* server as it would be performing unecessary work.
* If it was left running, however, it would not
* lead to data corruption or such-like. Use the
* This process should only be used continuously where the index
* transactions are following the database transactions. Use the
* {@link #setRunContinuously(boolean) runContinuously} property
* to change this behaviour.
* </li>
@@ -91,7 +91,7 @@ public class FullIndexRecoveryComponent extends HibernateDaoSupport implements I
private static boolean started = false;
/** The current transaction ID being processed */
private static String currentTxnId = START_TXN_ID;
/** kept to notify the thread that it should quite */
/** kept to notify the thread that it should quit */
private boolean killThread = false;
/** provides transactions to atomically index each missed transaction */
@@ -104,8 +104,6 @@ public class FullIndexRecoveryComponent extends HibernateDaoSupport implements I
private SearchService searcher;
/** the component giving direct access to <b>node</b> instances */
private NodeService nodeService;
/** the stores to reindex */
private List<StoreRef> storeRefs;
/** set this to run the index recovery component */
private boolean executeFullRecovery;
/** set this on to keep checking for new transactions and never stop */
@@ -125,8 +123,6 @@ public class FullIndexRecoveryComponent extends HibernateDaoSupport implements I
public FullIndexRecoveryComponent()
{
this.storeRefs = new ArrayList<StoreRef>(2);
this.killThread = false;
this.executeFullRecovery = false;
this.runContinuously = false;
@@ -193,21 +189,6 @@ public class FullIndexRecoveryComponent extends HibernateDaoSupport implements I
this.nodeService = nodeService;
}
/**
* Set the stores that need reindexing
*
* @param storeRefStrings a list of strings representing store references
*/
public void setStores(List<String> storeRefStrings)
{
storeRefs.clear();
for (String storeRefStr : storeRefStrings)
{
StoreRef storeRef = new StoreRef(storeRefStr);
storeRefs.add(storeRef);
}
}
/**
* Set this to <code>true</code> to initiate the full index recovery.
* <p>
@@ -299,6 +280,7 @@ public class FullIndexRecoveryComponent extends HibernateDaoSupport implements I
{
public Object doWork()
{
List<StoreRef> storeRefs = nodeService.getStores();
// reindex each store
for (StoreRef storeRef : storeRefs)
{
@@ -352,8 +334,7 @@ public class FullIndexRecoveryComponent extends HibernateDaoSupport implements I
if (logger.isDebugEnabled())
{
logger.debug("Full index recovery thread started: \n" +
" continuous: " + runContinuously + "\n" +
" stores: " + storeRefs);
" continuous: " + runContinuously);
}
}
}
@@ -377,8 +358,8 @@ public class FullIndexRecoveryComponent extends HibernateDaoSupport implements I
// reindex nodes
List<String> txnsIndexed = FullIndexRecoveryComponent.this.reindexNodes();
// reindex missing content
@SuppressWarnings("unused")
int missingContentCount = FullIndexRecoveryComponent.this.reindexMissingContent();
// @SuppressWarnings("unused")
// int missingContentCount = FullIndexRecoveryComponent.this.reindexMissingContent();
// check if the process should terminate
if (txnsIndexed.size() == 0 && !runContinuously)
{
@@ -417,73 +398,6 @@ public class FullIndexRecoveryComponent extends HibernateDaoSupport implements I
}
}
/**
* @return Returns the number of documents reindexed
*/
private int reindexMissingContent()
{
int count = 0;
for (StoreRef storeRef : storeRefs)
{
count += reindexMissingContent(storeRef);
}
return count;
}
/**
* @param storeRef the store to check for missing content
* @return Returns the number of documents reindexed
*/
private int reindexMissingContent(StoreRef storeRef)
{
SearchParameters sp = new SearchParameters();
sp.addStore(storeRef);
// search for it in the index
String query = "TEXT:" + LuceneIndexerImpl.NOT_INDEXED_CONTENT_MISSING;
sp.setLanguage(SearchService.LANGUAGE_LUCENE);
sp.setQuery(query);
ResultSet results = null;
try
{
results = searcher.query(sp);
int count = 0;
// loop over the results and get the details of the nodes that have missing content
List<ChildAssociationRef> assocRefs = results.getChildAssocRefs();
for (ChildAssociationRef assocRef : assocRefs)
{
final NodeRef childNodeRef = assocRef.getChildRef();
// prompt for a reindex - it might fail again, but we just keep plugging away
TransactionWork<Object> reindexWork = new TransactionWork<Object>()
{
public Object doWork()
{
indexer.updateNode(childNodeRef);
return null;
}
};
TransactionUtil.executeInNonPropagatingUserTransaction(transactionService, reindexWork);
count++;
}
// done
if (logger.isDebugEnabled())
{
logger.debug("Reindexed missing content: \n" +
" store: " + storeRef + "\n" +
" node count: " + count);
}
return count;
}
finally
{
if (results != null)
{
results.close();
}
}
}
/**
* @return Returns the transaction ID just reindexed, i.e. where some work was performed
*/
@@ -572,16 +486,16 @@ public class FullIndexRecoveryComponent extends HibernateDaoSupport implements I
getSession().setCacheMode(l2CacheMode);
// reindex each store
for (StoreRef storeRef : storeRefs)
{
if (!nodeService.exists(storeRef))
{
// the store is not present
continue;
}
// reindex for store
reindexNodes(storeRef, changeTxnId);
}
// for (StoreRef storeRef : storeRefs)
// {
// if (!nodeService.exists(storeRef))
// {
// // the store is not present
// continue;
// }
// // reindex for store
// reindexNodes(storeRef, changeTxnId);
// }
// done
return null;
}
@@ -675,10 +589,10 @@ public class FullIndexRecoveryComponent extends HibernateDaoSupport implements I
};
/**
* Retrieve all transaction IDs that are greater than the given transaction ID.
* Retrieve next 50 transaction IDs that are greater than the given transaction ID.
*
* @param currentTxnId the transaction ID that must be less than all returned results
* @return Returns an ordered list of transaction IDs
* @return Returns an ordered list of the next 50 transaction IDs
*/
@SuppressWarnings("unchecked")
public List<String> getNextChangeTxnIds(final String currentTxnId)
@@ -689,6 +603,7 @@ public class FullIndexRecoveryComponent extends HibernateDaoSupport implements I
{
Query query = session.getNamedQuery(QUERY_GET_NEXT_CHANGE_TXN_IDS);
query.setString("currentTxnId", currentTxnId)
.setMaxResults(50)
.setReadOnly(true);
return query.list();
}

View File

@@ -123,7 +123,7 @@ public class FullIndexRecoveryComponentTest extends TestCase
String txnId = TransactionUtil.executeInNonPropagatingUserTransaction(txnService, dropNodeIndexWork);
indexRecoverer.setExecuteFullRecovery(true);
indexRecoverer.setStores(storeRefStrings);
// indexRecoverer.setStores(storeRefStrings);
// reindex
indexRecoverer.reindex();

View File

@@ -0,0 +1,741 @@
///*
// * Copyright (C) 2005-2006 Alfresco, Inc.
// *
// * Licensed under the Mozilla Public License version 1.1
// * with a permitted attribution clause. You may obtain a
// * copy of the License at
// *
// * http://www.alfresco.org/legal/license.txt
// *
// * Unless required by applicable law or agreed to in writing,
// * software distributed under the License is distributed on an
// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// * either express or implied. See the License for the specific
// * language governing permissions and limitations under the
// * License.
// */
//package org.alfresco.repo.node.index;
//
//import java.util.ArrayList;
//import java.util.List;
//
//import org.alfresco.error.AlfrescoRuntimeException;
//import org.alfresco.model.ContentModel;
//import org.alfresco.repo.domain.NodeStatus;
//import org.alfresco.repo.search.Indexer;
//import org.alfresco.repo.search.impl.lucene.LuceneIndexerImpl;
//import org.alfresco.repo.search.impl.lucene.fts.FullTextSearchIndexer;
//import org.alfresco.repo.transaction.TransactionUtil;
//import org.alfresco.repo.transaction.TransactionUtil.TransactionWork;
//import org.alfresco.service.cmr.repository.ChildAssociationRef;
//import org.alfresco.service.cmr.repository.NodeRef;
//import org.alfresco.service.cmr.repository.NodeService;
//import org.alfresco.service.cmr.repository.StoreRef;
//import org.alfresco.service.cmr.search.ResultSet;
//import org.alfresco.service.cmr.search.SearchParameters;
//import org.alfresco.service.cmr.search.SearchService;
//import org.alfresco.service.transaction.TransactionService;
//import org.apache.commons.logging.Log;
//import org.apache.commons.logging.LogFactory;
//import org.hibernate.CacheMode;
//import org.hibernate.Query;
//import org.hibernate.Session;
//import org.springframework.orm.hibernate3.HibernateCallback;
//import org.springframework.orm.hibernate3.support.HibernateDaoSupport;
//
///**
// * Ensures that the FTS indexing picks up on any outstanding documents that
// * require indexing.
// * <p>
// * This component must be used as a singleton (one per VM) and may only be
// * called to reindex once. It will start a thread that processes all available
// * transactions and keeps checking to ensure that the index is up to date with
// * the latest database changes.
// * <p>
// * <b>The following points are important:</b>
// * <ul>
// * <li>
// * By default, the Hibernate L2 cache is used during processing.
// * This can be disabled by either disabling the L2 cache globally
// * for the server (not recommended) or by setting the
// * {@link #setL2CacheMode(String) l2CacheMode} property. If the
// * database is static then the L2 cache usage can be set to use
// * the <code>NORMAL</code> mode. <code>REFRESH</code> should be
// * used where the server will still be accessed from some clients
// * despite the database changing. <code>NORMAL</code> can be used
// * in the case of the caches being clustered, i.e. the caches will
// * not be out of date w.r.t. the database.
// * </li>
// * <li>
// * This process should only be used continuously where the index
// * transactions are following the database transactions. Use the
// * {@link #setRunContinuously(boolean) runContinuously} property
// * to change this behaviour.
// * </li>
// * </ul>
// *
// * @author Derek Hulley
// */
//public class MissingContentReindexComponent extends HibernateDaoSupport implements IndexRecovery
//{
// public static final String QUERY_GET_NEXT_CHANGE_TXN_IDS = "node.GetNextChangeTxnIds";
// public static final String QUERY_GET_CHANGED_NODE_STATUSES = "node.GetChangedNodeStatuses";
// public static final String QUERY_GET_DELETED_NODE_STATUSES = "node.GetDeletedNodeStatuses";
// public static final String QUERY_GET_CHANGED_NODE_STATUSES_COUNT = "node.GetChangedNodeStatusesCount";
//
// private static final String START_TXN_ID = "000";
//
// private static Log logger = LogFactory.getLog(FullIndexRecoveryComponent.class);
//
// /** ensures that this process is kicked off once per VM */
// private static boolean started = false;
// /** The current transaction ID being processed */
// private static String currentTxnId = START_TXN_ID;
// /** kept to notify the thread that it should quite */
// private boolean killThread = false;
//
// /** provides transactions to atomically index each missed transaction */
// private TransactionService transactionService;
// /** the component to index the node hierarchy */
// private Indexer indexer;
// /** the FTS indexer that we will prompt to pick up on any un-indexed text */
// private FullTextSearchIndexer ftsIndexer;
// /** the component providing searches of the indexed nodes */
// private SearchService searcher;
// /** the component giving direct access to <b>node</b> instances */
// private NodeService nodeService;
// /** set this to run the index recovery component */
// private boolean executeFullRecovery;
// /** set this on to keep checking for new transactions and never stop */
// private boolean runContinuously;
// /** set the time to wait between checking indexes */
// private long waitTime;
// /** controls how the L2 cache is used */
// private CacheMode l2CacheMode;
//
// /**
// * @return Returns the ID of the current (or last) transaction processed
// */
// public static String getCurrentTransactionId()
// {
// return currentTxnId;
// }
//
// public FullIndexRecoveryComponent()
// {
// this.killThread = false;
// this.executeFullRecovery = false;
// this.runContinuously = false;
// this.waitTime = 1000L;
// this.l2CacheMode = CacheMode.REFRESH;
//
// // ensure that we kill the thread when the VM is shutting down
// Runnable shutdownRunnable = new Runnable()
// {
// public void run()
// {
// killThread = true;
// };
// };
// Thread shutdownThread = new Thread(shutdownRunnable);
// Runtime.getRuntime().addShutdownHook(shutdownThread);
// }
//
// /**
// * @return Returns true if the component has already been started
// */
// public static boolean isStarted()
// {
// return started;
// }
//
// /**
// * @param transactionService provide transactions to index each missed transaction
// */
// public void setTransactionService(TransactionService transactionService)
// {
// this.transactionService = transactionService;
// }
//
// /**
// * @param indexer the indexer that will be index
// */
// public void setIndexer(Indexer indexer)
// {
// this.indexer = indexer;
// }
//
// /**
// * @param ftsIndexer the FTS background indexer
// */
// public void setFtsIndexer(FullTextSearchIndexer ftsIndexer)
// {
// this.ftsIndexer = ftsIndexer;
// }
//
// /**
// * @param searcher component providing index searches
// */
// public void setSearcher(SearchService searcher)
// {
// this.searcher = searcher;
// }
//
// /**
// * @param nodeService provides information about nodes for indexing
// */
// public void setNodeService(NodeService nodeService)
// {
// this.nodeService = nodeService;
// }
//
// /**
// * Set this to <code>true</code> to initiate the full index recovery.
// * <p>
// * This used to default to <code>true</code> but is now false. Set this
// * if the potentially long-running process of checking and fixing the
// * indexes must be started.
// *
// * @param executeFullRecovery
// */
// public void setExecuteFullRecovery(boolean executeFullRecovery)
// {
// this.executeFullRecovery = executeFullRecovery;
// }
//
// /**
// * Set this to ensure that the process continuously checks for new transactions.
// * If not, it will permanently terminate once it catches up with the current
// * transactions.
// *
// * @param runContinuously true to never cease looking for new transactions
// */
// public void setRunContinuously(boolean runContinuously)
// {
// this.runContinuously = runContinuously;
// }
//
// /**
// * Set the time to wait between checking for new transaction changes in the database.
// *
// * @param waitTime the time to wait in milliseconds
// */
// public void setWaitTime(long waitTime)
// {
// this.waitTime = waitTime;
// }
//
// /**
// * Set the hibernate cache mode by name
// *
// * @see org.hibernate.CacheMode
// */
// public void setL2CacheMode(String l2CacheModeStr)
// {
// if (l2CacheModeStr.equals("GET"))
// {
// l2CacheMode = CacheMode.GET;
// }
// else if (l2CacheModeStr.equals("IGNORE"))
// {
// l2CacheMode = CacheMode.IGNORE;
// }
// else if (l2CacheModeStr.equals("NORMAL"))
// {
// l2CacheMode = CacheMode.NORMAL;
// }
// else if (l2CacheModeStr.equals("PUT"))
// {
// l2CacheMode = CacheMode.PUT;
// }
// else if (l2CacheModeStr.equals("REFRESH"))
// {
// l2CacheMode = CacheMode.REFRESH;
// }
// else
// {
// throw new IllegalArgumentException("Unrecognised Hibernate L2 cache mode: " + l2CacheModeStr);
// }
// }
//
// /**
// * Ensure that the index is up to date with the current state of the persistence layer.
// * The full list of unique transaction change IDs is retrieved and used to detect
// * which are not present in the index. All the node changes and deletions for the
// * remaining transactions are then indexed.
// */
// public synchronized void reindex()
// {
// if (FullIndexRecoveryComponent.started)
// {
// throw new AlfrescoRuntimeException
// ("Only one FullIndexRecoveryComponent may be used per VM and it may only be called once");
// }
//
// // ensure that we don't redo this work
// FullIndexRecoveryComponent.started = true;
//
// // work to mark the stores for full text reindexing
// TransactionWork<Object> ftsReindexWork = new TransactionWork<Object>()
// {
// public Object doWork()
// {
// List<StoreRef> storeRefs = nodeService.getStores();
// // reindex each store
// for (StoreRef storeRef : storeRefs)
// {
// // check if the store exists
// if (!nodeService.exists(storeRef))
// {
// // store does not exist
// if (logger.isDebugEnabled())
// {
// logger.debug("Skipping reindex of non-existent store: " + storeRef);
// }
// continue;
// }
//
// // prompt FTS to reindex the store
// ftsIndexer.requiresIndex(storeRef);
// }
// // done
// if (logger.isDebugEnabled())
// {
// logger.debug("Prompted FTS index on stores: " + storeRefs);
// }
// return null;
// }
// };
// TransactionUtil.executeInNonPropagatingUserTransaction(transactionService, ftsReindexWork);
//
// // start full index recovery, if necessary
// if (!this.executeFullRecovery)
// {
// if (logger.isDebugEnabled())
// {
// logger.debug("Full index recovery is off - quitting");
// }
// }
// else
// {
// // set the state of the reindex
// FullIndexRecoveryComponent.currentTxnId = START_TXN_ID;
//
// // start a stateful thread that will begin processing the reindexing the transactions
// Runnable runnable = new ReindexRunner();
// Thread reindexThread = new Thread(runnable);
// // make it a daemon thread
// reindexThread.setDaemon(true);
// // it should not be a high priority
// reindexThread.setPriority(Thread.MIN_PRIORITY);
// // start it
// reindexThread.start();
//
// if (logger.isDebugEnabled())
// {
// logger.debug("Full index recovery thread started: \n" +
// " continuous: " + runContinuously);
// }
// }
// }
//
// /**
// * Stateful thread runnable that executes reindex calls.
// *
// * @see FullIndexRecoveryComponent#reindexNodes()
// *
// * @author Derek Hulley
// */
// private class ReindexRunner implements Runnable
// {
// public void run()
// {
// // keep this thread going permanently
// while (!killThread)
// {
// try
// {
// // reindex nodes
// List<String> txnsIndexed = FullIndexRecoveryComponent.this.reindexNodes();
// // reindex missing content
// @SuppressWarnings("unused")
// int missingContentCount = FullIndexRecoveryComponent.this.reindexMissingContent();
// // check if the process should terminate
// if (txnsIndexed.size() == 0 && !runContinuously)
// {
// // the thread has caught up with all the available work and should not
// // run continuously
// if (logger.isDebugEnabled())
// {
// logger.debug("Thread quitting - no more available indexing to do: \n" +
// " last txn: " + FullIndexRecoveryComponent.getCurrentTransactionId());
// }
// break;
// }
// // brief pause
// synchronized(FullIndexRecoveryComponent.this)
// {
// FullIndexRecoveryComponent.this.wait(waitTime);
// }
// }
// catch (InterruptedException e)
// {
// // ignore
// }
// catch (Throwable e)
// {
// if (killThread)
// {
// // the shutdown may have caused the exception - ignore it
// }
// else
// {
// // we are still a go; report it
// logger.error("Reindex failure", e);
// }
// }
// }
// }
// }
//
// /**
// * @return Returns the number of documents reindexed
// */
// private int reindexMissingContent()
// {
// int count = 0;
// for (StoreRef storeRef : storeRefs)
// {
// count += reindexMissingContent(storeRef);
// }
// return count;
// }
//
// /**
// * @param storeRef the store to check for missing content
// * @return Returns the number of documents reindexed
// */
// private int reindexMissingContent(StoreRef storeRef)
// {
// SearchParameters sp = new SearchParameters();
// sp.addStore(storeRef);
//
// // search for it in the index
// String query = "TEXT:" + LuceneIndexerImpl.NOT_INDEXED_CONTENT_MISSING;
// sp.setLanguage(SearchService.LANGUAGE_LUCENE);
// sp.setQuery(query);
// ResultSet results = null;
// try
// {
// results = searcher.query(sp);
//
// int count = 0;
// // loop over the results and get the details of the nodes that have missing content
// List<ChildAssociationRef> assocRefs = results.getChildAssocRefs();
// for (ChildAssociationRef assocRef : assocRefs)
// {
// final NodeRef childNodeRef = assocRef.getChildRef();
// // prompt for a reindex - it might fail again, but we just keep plugging away
// TransactionWork<Object> reindexWork = new TransactionWork<Object>()
// {
// public Object doWork()
// {
// indexer.updateNode(childNodeRef);
// return null;
// }
// };
// TransactionUtil.executeInNonPropagatingUserTransaction(transactionService, reindexWork);
// count++;
// }
// // done
// if (logger.isDebugEnabled())
// {
// logger.debug("Reindexed missing content: \n" +
// " store: " + storeRef + "\n" +
// " node count: " + count);
// }
// return count;
// }
// finally
// {
// if (results != null)
// {
// results.close();
// }
// }
// }
//
// /**
// * @return Returns the transaction ID just reindexed, i.e. where some work was performed
// */
// private List<String> reindexNodes()
// {
// // get a list of all transactions still requiring a check
// List<String> txnsToCheck = getNextChangeTxnIds(FullIndexRecoveryComponent.currentTxnId);
//
// // loop over each transaction
// for (String changeTxnId : txnsToCheck)
// {
// reindexNodes(changeTxnId);
// }
//
// // done
// return txnsToCheck;
// }
//
// /**
// * Reindexes changes specific to the change transaction ID.
// * <p>
// * <b>All exceptions are absorbed.</b>
// */
// private void reindexNodes(final String changeTxnId)
// {
// /*
// * This must execute each within its own transaction.
// * The cache size is therefore not an issue.
// */
// TransactionWork<Object> reindexWork = new TransactionWork<Object>()
// {
// public Object doWork() throws Exception
// {
// // perform the work in a Hibernate callback
// HibernateCallback callback = new ReindexCallback(changeTxnId);
// getHibernateTemplate().execute(callback);
// // done
// return null;
// }
// };
// try
// {
// TransactionUtil.executeInNonPropagatingUserTransaction(transactionService, reindexWork);
// }
// catch (Throwable e)
// {
// logger.error("Transaction reindex failed: \n" +
// " txn: " + changeTxnId,
// e);
// }
// finally
// {
// // Up the current transaction now, in case the process fails at this point.
// // This will prevent the transaction from being processed again.
// // This applies to failures as well, which should be dealt with externally
// // and having the entire process start again, e.g. such as a system reboot
// currentTxnId = changeTxnId;
// }
// }
//
// /**
// * Stateful inner class that implements a single reindex call for a given store
// * and transaction.
// * <p>
// * It must be called within its own transaction.
// *
// * @author Derek Hulley
// */
// private class ReindexCallback implements HibernateCallback
// {
// private final String changeTxnId;
//
// public ReindexCallback(String changeTxnId)
// {
// this.changeTxnId = changeTxnId;
// }
//
// /**
// * Changes the L2 cache usage before reindexing for each store
// *
// * @see #reindexNodes(StoreRef, String)
// */
// public Object doInHibernate(Session session)
// {
// // set the way the L2 cache is used
// getSession().setCacheMode(l2CacheMode);
//
// // reindex each store
// for (StoreRef storeRef : storeRefs)
// {
// if (!nodeService.exists(storeRef))
// {
// // the store is not present
// continue;
// }
// // reindex for store
// reindexNodes(storeRef, changeTxnId);
// }
// // done
// return null;
// }
//
// private void reindexNodes(StoreRef storeRef, String changeTxnId)
// {
// // check if we need to perform this operation
// SearchParameters sp = new SearchParameters();
// sp.addStore(storeRef);
//
// // search for it in the index
// String query = "TX:\"" + changeTxnId + "\"";
// sp.setLanguage(SearchService.LANGUAGE_LUCENE);
// sp.setQuery(query);
// ResultSet results = null;
// try
// {
// results = searcher.query(sp);
// // did the index have any of these changes?
// if (results.length() > 0)
// {
// // the transaction has an entry in the index - assume that it was
// // atomically correct
// if (logger.isDebugEnabled())
// {
// logger.debug("Transaction present in index - no indexing required: \n" +
// " store: " + storeRef + "\n" +
// " txn: " + changeTxnId);
// }
// return;
// }
// }
// finally
// {
// if (results != null)
// {
// results.close();
// }
// }
// // the index has no record of this
// // were there any changes, or is it all just deletions?
// int changedCount = getChangedNodeStatusesCount(storeRef, changeTxnId);
// if (changedCount == 0)
// {
// // no nodes were changed in the transaction, i.e. they are only deletions
// // the index is quite right not to have any entries for the transaction
// if (logger.isDebugEnabled())
// {
// logger.debug("Transaction only has deletions - no indexing required: \n" +
// " store: " + storeRef + "\n" +
// " txn: " + changeTxnId);
// }
// return;
// }
//
// // process the deletions relevant to the txn and the store
// List<NodeStatus> deletedNodeStatuses = getDeletedNodeStatuses(storeRef, changeTxnId);
// for (NodeStatus status : deletedNodeStatuses)
// {
// NodeRef nodeRef = new NodeRef(storeRef, status.getKey().getGuid());
// // only the child node ref is relevant
// ChildAssociationRef assocRef = new ChildAssociationRef(
// ContentModel.ASSOC_CHILDREN,
// null,
// null,
// nodeRef);
// indexer.deleteNode(assocRef);
// }
//
// // process additions
// List<NodeStatus> changedNodeStatuses = getChangedNodeStatuses(storeRef, changeTxnId);
// for (NodeStatus status : changedNodeStatuses)
// {
// NodeRef nodeRef = new NodeRef(storeRef, status.getKey().getGuid());
// // get the primary assoc for the node
// ChildAssociationRef primaryAssocRef = nodeService.getPrimaryParent(nodeRef);
// // reindex
// indexer.createNode(primaryAssocRef);
// }
//
// // done
// if (logger.isDebugEnabled())
// {
// logger.debug("Transaction reindexed: \n" +
// " store: " + storeRef + "\n" +
// " txn: " + changeTxnId + "\n" +
// " deletions: " + deletedNodeStatuses.size() + "\n" +
// " modifications: " + changedNodeStatuses.size());
// }
// }
// };
//
// /**
// * Retrieve all transaction IDs that are greater than the given transaction ID.
// *
// * @param currentTxnId the transaction ID that must be less than all returned results
// * @return Returns an ordered list of transaction IDs
// */
// @SuppressWarnings("unchecked")
// public List<String> getNextChangeTxnIds(final String currentTxnId)
// {
// HibernateCallback callback = new HibernateCallback()
// {
// public Object doInHibernate(Session session)
// {
// Query query = session.getNamedQuery(QUERY_GET_NEXT_CHANGE_TXN_IDS);
// query.setString("currentTxnId", currentTxnId)
// .setReadOnly(true);
// return query.list();
// }
// };
// List<String> queryResults = (List<String>) getHibernateTemplate().execute(callback);
// // done
// return queryResults;
// }
//
// @SuppressWarnings("unchecked")
// public int getChangedNodeStatusesCount(final StoreRef storeRef, final String changeTxnId)
// {
// HibernateCallback callback = new HibernateCallback()
// {
// public Object doInHibernate(Session session)
// {
// Query query = session.getNamedQuery(QUERY_GET_CHANGED_NODE_STATUSES_COUNT);
// query.setString("storeProtocol", storeRef.getProtocol())
// .setString("storeIdentifier", storeRef.getIdentifier())
// .setString("changeTxnId", changeTxnId)
// .setReadOnly(true);
// return query.uniqueResult();
// }
// };
// Integer changeCount = (Integer) getHibernateTemplate().execute(callback);
// // done
// return changeCount.intValue();
// }
//
// @SuppressWarnings("unchecked")
// public List<NodeStatus> getChangedNodeStatuses(final StoreRef storeRef, final String changeTxnId)
// {
// HibernateCallback callback = new HibernateCallback()
// {
// public Object doInHibernate(Session session)
// {
// Query query = session.getNamedQuery(QUERY_GET_CHANGED_NODE_STATUSES);
// query.setString("storeProtocol", storeRef.getProtocol())
// .setString("storeIdentifier", storeRef.getIdentifier())
// .setString("changeTxnId", changeTxnId)
// .setReadOnly(true);
// return query.list();
// }
// };
// List<NodeStatus> queryResults = (List) getHibernateTemplate().execute(callback);
// // done
// return queryResults;
// }
//
// @SuppressWarnings("unchecked")
// public List<NodeStatus> getDeletedNodeStatuses(final StoreRef storeRef, final String changeTxnId)
// {
// HibernateCallback callback = new HibernateCallback()
// {
// public Object doInHibernate(Session session)
// {
// Query query = session.getNamedQuery(QUERY_GET_DELETED_NODE_STATUSES);
// query.setString("storeProtocol", storeRef.getProtocol())
// .setString("storeIdentifier", storeRef.getIdentifier())
// .setString("changeTxnId", changeTxnId)
// .setReadOnly(true);
// return query.list();
// }
// };
// List<NodeStatus> queryResults = (List) getHibernateTemplate().execute(callback);
// // done
// return queryResults;
// }
//}

View File

@@ -1,136 +0,0 @@
/*
* Copyright (C) 2005 Alfresco, Inc.
*
* Licensed under the Mozilla Public License version 1.1
* with a permitted attribution clause. You may obtain a
* copy of the License at
*
* http://www.alfresco.org/legal/license.txt
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*/
package org.alfresco.util.debug;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.aopalliance.intercept.MethodInterceptor;
import org.aopalliance.intercept.MethodInvocation;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Performs writing to DEBUG of incoming arguments and outgoing results for a method call.<br>
* If the method invocation throws an exception, then the incoming arguments are
* logged to DEBUG as well.<br>
* The implementation adds very little overhead to a normal method
* call by only building log messages when required.
* <p>
* The logging is done against the logger retrieved using the names:
* <p>
* <pre>
* org.alfresco.util.debug.MethodCallLogAdvice
* AND
* targetClassName
* targetClassName.methodName
* targetClassName.methodName.exception
* </pre>
* <p>
* The following examples show how to control the log levels:
* <p>
* <pre>
* org.alfresco.util.debug.MethodCallLogAdvice=DEBUG # activate method logging
* AND
* x.y.MyClass=DEBUG # log debug for all method calls on MyClass
* x.y.MyClass.doSomething=DEBUG # log debug for all doSomething method calls
* x.y.MyClass.doSomething.exception=DEBUG # only log debug for doSomething() upon exception
* </pre>
* <p>
*
* @author Derek Hulley
*/
public class MethodCallLogAdvice implements MethodInterceptor
{
private static final Log logger = LogFactory.getLog(MethodCallLogAdvice.class);
public Object invoke(MethodInvocation invocation) throws Throwable
{
if (logger.isDebugEnabled())
{
return invokeWithLogging(invocation);
}
else
{
// no logging required
return invocation.proceed();
}
}
/**
* Only executes logging code if logging is required
*/
private Object invokeWithLogging(MethodInvocation invocation) throws Throwable
{
String methodName = invocation.getMethod().getName();
String className = invocation.getMethod().getDeclaringClass().getName();
// execute as normal
try
{
Object ret = invocation.proceed();
// logging
Log methodLogger = LogFactory.getLog(className + "." + methodName);
if (methodLogger.isDebugEnabled())
{
// log success
StringBuffer sb = getInvocationInfo(className, methodName, invocation.getArguments());
sb.append(" Result: ").append(ret);
methodLogger.debug(sb);
}
// done
return ret;
}
catch (Throwable e)
{
Log exceptionLogger = LogFactory.getLog(className + "." + methodName + ".exception");
if (exceptionLogger.isDebugEnabled())
{
StringBuffer sb = getInvocationInfo(className, methodName, invocation.getArguments());
sb.append(" Failure: ").append(e.getClass().getName()).append(" - ").append(e.getMessage());
exceptionLogger.debug(sb);
}
// rethrow
throw e;
}
}
/**
* Return format:
* <pre>
* Method: className#methodName
* Argument: arg0
* Argument: arg1
* ...
* Argument: argN {newline}
* </pre>
*
* @param className
* @param methodName
* @param args
* @return Returns a StringBuffer containing the details of a method call
*/
private StringBuffer getInvocationInfo(String className, String methodName, Object[] args)
{
StringBuffer sb = new StringBuffer(250);
sb.append("\nMethod: ").append(className).append("#").append(methodName).append("\n");
sb.append(" Transaction: ").append(AlfrescoTransactionSupport.getTransactionId()).append("\n");
for (Object arg : args)
{
sb.append(" Argument: ").append(arg).append("\n");
}
return sb;
}
}