Merged V1.4 to HEAD

svn merge svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@4133 svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@4145 .
   svn merge svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@4147 svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@4148 .
   svn merge svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@4151 svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@4152 .
   svn merge svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@4157 svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@4159 .
   svn merge svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@4161 svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@4162 .
   svn merge svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@4169 svn://svn.alfresco.com:3691/alfresco/BRANCHES/V1.4@4175 .
   
   Skipped:
      4146, 4151, 4153, 4156, 4157, 4160, 4163-4167 (inclusive)
   Last included:
      4175



git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@4176 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
This commit is contained in:
Derek Hulley
2006-10-20 02:03:05 +00:00
parent be167f60cf
commit ed140c671f
17 changed files with 235 additions and 62 deletions

View File

@@ -1,4 +0,0 @@
--
-- Insert post-creation scripts here
-- This is specific to the dialect described in the path to the file
--

View File

@@ -16,6 +16,12 @@ CREATE INDEX FKE1A550BCA8FC7769 ON alf_node_assoc (target_node_id);
CREATE INDEX FK71C2002B7F2C8017 ON alf_node_status (node_id);
CREATE INDEX FKBD4FF53D22DBA5BA ON alf_store (root_node_id);
--
-- Transaction tables
--
CREATE INDEX FK71C2002B9E57C13D ON alf_node_status (transaction_id);
CREATE INDEX FKB8761A3A9AE340B7 ON alf_transaction (server_id);
--
-- New audit tables
--

View File

@@ -0,0 +1,30 @@
--
-- Add post-creation indexes. (SQL Server Schema 1.4)
--
CREATE INDEX FKFFF41F9960601995 ON alf_access_control_entry (permission_id);
CREATE INDEX FKFFF41F99B25A50BF ON alf_access_control_entry (authority_id);
CREATE INDEX FKFFF41F99B9553F6C ON alf_access_control_entry (acl_id);
CREATE INDEX FK8A749A657B7FDE43 ON alf_auth_ext_keys (id);
CREATE INDEX FKFFC5468E74173FF4 ON alf_child_assoc (child_node_id);
CREATE INDEX FKFFC5468E8E50E582 ON alf_child_assoc (parent_node_id);
CREATE INDEX FK60EFB626B9553F6C ON alf_node (acl_id);
CREATE INDEX FK60EFB626D24ADD25 ON alf_node (protocol, identifier);
CREATE INDEX FK7D4CF8EC7F2C8017 ON alf_node_properties (node_id);
CREATE INDEX FKD654E027F2C8017 ON alf_node_aspects (node_id);
CREATE INDEX FKE1A550BCB69C43F3 ON alf_node_assoc (source_node_id);
CREATE INDEX FKE1A550BCA8FC7769 ON alf_node_assoc (target_node_id);
CREATE INDEX FK71C2002B7F2C8017 ON alf_node_status (node_id);
CREATE INDEX FKBD4FF53D22DBA5BA ON alf_store (root_node_id);
--
-- Transaction tables
--
CREATE INDEX FK71C2002B9E57C13D ON alf_node_status (transaction_id);
CREATE INDEX FKB8761A3A9AE340B7 ON alf_transaction (server_id);
--
-- New audit tables
--
CREATE INDEX FKEAD1817484342E39 ON alf_audit_fact (audit_date_id);
CREATE INDEX FKEAD18174A0F9B8D9 ON alf_audit_fact (audit_source_id);
CREATE INDEX FKEAD18174F524CFD7 ON alf_audit_fact (audit_conf_id);

View File

@@ -1,5 +1,5 @@
-- ------------------------------------------------------
-- Alfresco Schema conversion V1.3 to V1.4 Part 1
-- Alfresco Schema conversion V1.3 to V1.4 Part 1 (MySQL)
--
-- Adds the columns required to enforce the duplicate name detection
--
@@ -25,6 +25,11 @@ DROP TABLE IF EXISTS T_permission;
DROP TABLE IF EXISTS T_store;
DROP TABLE IF EXISTS T_version_count;
--
-- Upgrades to 1.3 of MyIsam tables could have missed the applied_patch table InnoDB
--
ALTER TABLE applied_patch ENGINE = InnoDB;
--
-- Unique name constraint
--
@@ -47,20 +52,70 @@ ALTER TABLE node_assoc
--
-- Rename tables to give 'alf_' prefix
--
ALTER TABLE access_control_entry RENAME TO alf_access_control_entry;
ALTER TABLE access_control_list RENAME TO alf_access_control_list;
ALTER TABLE applied_patch RENAME TO alf_applied_patch;
ALTER TABLE auth_ext_keys RENAME TO alf_auth_ext_keys;
ALTER TABLE authority RENAME TO alf_authority;
ALTER TABLE child_assoc RENAME TO alf_child_assoc;
ALTER TABLE node RENAME TO alf_node;
ALTER TABLE node_aspects RENAME TO alf_node_aspects;
ALTER TABLE node_assoc RENAME TO alf_node_assoc;
ALTER TABLE node_properties RENAME TO alf_node_properties;
ALTER TABLE node_status RENAME TO alf_node_status;
ALTER TABLE permission RENAME TO alf_permission;
ALTER TABLE store RENAME TO alf_store;
ALTER TABLE version_count RENAME TO alf_version_count;
ALTER TABLE access_control_entry RENAME TO alf_access_control_entry;
ALTER TABLE access_control_list RENAME TO alf_access_control_list;
ALTER TABLE applied_patch RENAME TO alf_applied_patch;
ALTER TABLE auth_ext_keys RENAME TO alf_auth_ext_keys;
ALTER TABLE authority RENAME TO alf_authority;
ALTER TABLE child_assoc RENAME TO alf_child_assoc;
ALTER TABLE node RENAME TO alf_node;
ALTER TABLE node_aspects RENAME TO alf_node_aspects;
ALTER TABLE node_assoc RENAME TO alf_node_assoc;
ALTER TABLE node_properties RENAME TO alf_node_properties;
ALTER TABLE node_status RENAME TO alf_node_status;
ALTER TABLE permission RENAME TO alf_permission;
ALTER TABLE store RENAME TO alf_store;
ALTER TABLE version_count RENAME TO alf_version_count;
--
-- The table renames will cause Hibernate to rehash the FK constraint names.
-- For MySQL, Hibernate will generate scripts to add the appropriate constraints
-- and indexes.
--
ALTER TABLE alf_access_control_entry
DROP FOREIGN KEY FKF064DF7560601995,
DROP INDEX FKF064DF7560601995,
DROP FOREIGN KEY FKF064DF75B25A50BF,
DROP INDEX FKF064DF75B25A50BF,
DROP FOREIGN KEY FKF064DF75B9553F6C,
DROP INDEX FKF064DF75B9553F6C;
ALTER TABLE alf_auth_ext_keys
DROP FOREIGN KEY FK31D3BA097B7FDE43,
DROP INDEX FK31D3BA097B7FDE43;
ALTER TABLE alf_child_assoc
DROP FOREIGN KEY FKC6EFFF3274173FF4,
DROP INDEX FKC6EFFF3274173FF4,
DROP FOREIGN KEY FKC6EFFF328E50E582,
DROP INDEX FKC6EFFF328E50E582;(optional)
ALTER TABLE alf_child_assoc
DROP FOREIGN KEY FKFFC5468E74173FF4,
DROP INDEX FKFFC5468E74173FF4,
DROP FOREIGN KEY FKFFC5468E8E50E582,
DROP INDEX FKFFC5468E8E50E582;(optional)
ALTER TABLE alf_node
DROP FOREIGN KEY FK33AE02B9553F6C,
DROP INDEX FK33AE02B9553F6C;
ALTER TABLE alf_node
DROP FOREIGN KEY FK33AE02D24ADD25,
DROP INDEX FK33AE02D24ADD25;
ALTER TABLE alf_node_properties
DROP FOREIGN KEY FKC962BF907F2C8017,
DROP INDEX FKC962BF907F2C8017;
ALTER TABLE alf_node_aspects
DROP FOREIGN KEY FK2B91A9DE7F2C8017,
DROP INDEX FK2B91A9DE7F2C8017;
ALTER TABLE alf_node_assoc
DROP FOREIGN KEY FK5BAEF398B69C43F3,
DROP INDEX FK5BAEF398B69C43F3;
ALTER TABLE alf_node_assoc
DROP FOREIGN KEY FK5BAEF398A8FC7769,
DROP INDEX FK5BAEF398A8FC7769;
ALTER TABLE alf_node_status
DROP FOREIGN KEY FK38ECB8CF7F2C8017,
DROP INDEX FK38ECB8CF7F2C8017;
ALTER TABLE alf_store
DROP FOREIGN KEY FK68AF8E122DBA5BA,
DROP INDEX FK68AF8E122DBA5BA;
--
-- Record script finish

View File

@@ -1,5 +1,5 @@
-- ------------------------------------------------------
-- Alfresco Schema conversion V1.3 to V1.4 Part 2
-- Alfresco Schema conversion V1.3 to V1.4 Part 2 (MySQL)
--
-- Adds the alf_transaction and alf_server tables to keep track of the sources
-- of transactions.
@@ -25,7 +25,6 @@ CREATE TABLE alf_transaction (
change_txn_id varchar(56) NOT NULL,
PRIMARY KEY (id),
KEY FKB8761A3A9AE340B7 (server_id),
KEY IDX_CHANGE_TXN (change_txn_id),
CONSTRAINT FKB8761A3A9AE340B7 FOREIGN KEY (server_id) REFERENCES alf_server (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
insert into alf_transaction
@@ -44,7 +43,8 @@ UPDATE alf_node_status ns SET ns.transaction_id =
);
ALTER TABLE alf_node_status
DROP COLUMN change_txn_id,
ADD CONSTRAINT FK71C2002B9E57C13D FOREIGN KEY (transaction_id) REFERENCES alf_transaction (id);
ADD CONSTRAINT FK71C2002B9E57C13D FOREIGN KEY (transaction_id) REFERENCES alf_transaction (id),
ADD INDEX FK71C2002B9E57C13D (transaction_id);
--
-- Record script finish

View File

@@ -27,7 +27,6 @@ create table alf_transaction
change_txn_id varchar2(56 char) not null,
primary key (id)
);
create index CHANGE_TXN_ID on alf_transaction (change_txn_id);
alter table alf_transaction add constraint FKB8761A3A9AE340B7 foreign key (server_id) references alf_server;
create index FKB8761A3A9AE340B7 on alf_transaction (server_id);

View File

@@ -9,7 +9,13 @@
#db.pool.max=100
#
# MySQL connection (This is default and requires mysql-connector-java-3.1.12-bin.jar, which ships with the Alfresco server)
# HSQL connection
#
#db.driver=org.hsqldb.jdbcDriver
#db.url=jdbc:hsqldb:file:alf_data/hsql_data/alfresco;ifexists=true;shutdown=true;
#
# MySQL connection (This is default and requires mysql-connector-java-5.0.3-bin.jar, which ships with the Alfresco server)
#
#db.driver=org.gjt.mm.mysql.Driver
#db.url=jdbc:mysql://localhost/alfresco

View File

@@ -7,6 +7,11 @@
# For a full list: http://www.hibernate.org/hib_docs/v3/reference/en/html_single/#configuration-optional-dialects
#
#
# HSQL dialect
#
#hibernate.dialect=org.hibernate.dialect.HSQLDialect
#
# MySQL dialect (default)
#

View File

@@ -96,6 +96,6 @@ patch.schemaUpgradeScript.description=Ensures that the database upgrade script h
patch.schemaUpgradeScript.err.not_executed=The schema upgrade script, ''{0}'', has not been run against this database.
patch.uniqueChildName.description=Checks and renames duplicate children.
patch.uniqueChildName.copyOf=({0})
patch.uniqueChildName.copyOf=({0}-{1})
patch.uniqueChildName.result=Checked {0} associations and fixed {1} duplicates. See file {2} for details.
patch.uniqueChildName.err.unable_to_fix=Auto-fixing of duplicate names failed. See file {0} for details.

View File

@@ -1,6 +1,7 @@
# Schema update messages
schema.update.msg.executing_script=Executing database script: {0}
schema.update.msg.optional_statement_failed=Optional statement execution failed:\n SQL: {0}\n Error: {1}\n File: {2}\n Line: {3}
schema.update.err.statement_failed=Statement execution failed:\n SQL: {0}\n Error: {1}\n File: {2}\n Line: {3}
schema.update.err.update_failed=Schema auto-update failed
schema.update.err.validation_failed=Schema validation failed

View File

@@ -21,6 +21,7 @@ import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Collection;
import java.util.Date;
import java.util.List;
@@ -30,6 +31,7 @@ import org.alfresco.repo.admin.patch.AbstractPatch;
import org.alfresco.repo.domain.ChildAssoc;
import org.alfresco.repo.domain.Node;
import org.alfresco.repo.node.db.NodeDaoService;
import org.alfresco.service.cmr.admin.PatchException;
import org.alfresco.service.cmr.dictionary.AssociationDefinition;
import org.alfresco.service.cmr.dictionary.ChildAssociationDefinition;
import org.alfresco.service.cmr.dictionary.DictionaryService;
@@ -51,6 +53,7 @@ import org.springframework.orm.hibernate3.support.HibernateDaoSupport;
public class UniqueChildNamePatch extends AbstractPatch
{
private static final String MSG_SUCCESS = "patch.uniqueChildName.result";
private static final String ERR_UNABLE_TO_FIX = "patch.uniqueChildName.err.unable_to_fix";
private static final String MSG_COPY_OF = "patch.uniqueChildName.copyOf";
/** the number of associations to process at a time */
private static final int MAX_RESULTS = 1000;
@@ -143,6 +146,7 @@ public class UniqueChildNamePatch extends AbstractPatch
@SuppressWarnings("unused")
List<QName> assocTypeQNames = getUsedAssocQNames();
boolean unableToFix = false;
int fixed = 0;
int processed = 0;
// check loop through all associations, looking for duplicates
@@ -185,8 +189,10 @@ public class UniqueChildNamePatch extends AbstractPatch
String usedChildName = childName;
processed++;
boolean duplicate = false;
int duplicateNumber = 0;
while(true)
{
duplicateNumber++;
try
{
// push the name back to the node
@@ -195,11 +201,46 @@ public class UniqueChildNamePatch extends AbstractPatch
}
catch (DuplicateChildNodeNameException e)
{
// there was a duplicate, so adjust the name and change the node property
duplicate = true;
// assign a new name
usedChildName = childName + I18NUtil.getMessage(MSG_COPY_OF, processed);
// try again
if (duplicateNumber == 10)
{
// Try removing the secondary parent associations
writeLine(" Removing secondary parents of node " + childNode.getId());
Collection<ChildAssoc> parentAssocs = childNode.getParentAssocs();
for (ChildAssoc parentAssoc : parentAssocs)
{
if (!parentAssoc.getIsPrimary())
{
write(" - ").writeLine(parentAssoc);
// remove it
getSession().delete(parentAssoc);
}
}
// flush to ensure the database gets the changes
getSession().flush();
// try again to be sure
continue;
}
else if (duplicateNumber > 10)
{
// after 10 attempts, we have to admit defeat. Perhaps there is a larger issue.
Collection<ChildAssoc> parentAssocs = childNode.getParentAssocs();
write(" Unable to set child name '" + usedChildName + "' for node " + childNode.getId());
writeLine(" with parent associations:");
for (ChildAssoc parentAssoc : parentAssocs)
{
write(" - ").writeLine(parentAssoc);
}
duplicate = false;
unableToFix = true;
break;
}
else
{
// there was a duplicate, so adjust the name and change the node property
duplicate = true;
// assign a new name
usedChildName = childName + I18NUtil.getMessage(MSG_COPY_OF, processed, duplicateNumber);
}
}
}
// if duplicated, report it
@@ -209,11 +250,11 @@ public class UniqueChildNamePatch extends AbstractPatch
// get the node path
NodeRef parentNodeRef = childAssoc.getParent().getNodeRef();
Path path = nodeService.getPath(parentNodeRef);
writeLine(" Changed duplicated child name:");
writeLine(" Parent: " + parentNodeRef);
writeLine(" Parent path: " + path);
writeLine(" Duplicate name: " + childName);
writeLine(" Replaced with: " + usedChildName);
writeLine(" Changed duplicated child name:");
writeLine(" Parent: " + parentNodeRef);
writeLine(" Parent path: " + path);
writeLine(" Duplicate name: " + childName);
writeLine(" Replaced with: " + usedChildName);
}
}
// clear the session to preserve memory
@@ -222,10 +263,17 @@ public class UniqueChildNamePatch extends AbstractPatch
}
}
// build the result message
String msg = I18NUtil.getMessage(MSG_SUCCESS, processed, fixed, logFile);
return msg;
// check if it was successful or not
if (unableToFix)
{
throw new PatchException(ERR_UNABLE_TO_FIX, logFile);
}
else
{
// build the result message
String msg = I18NUtil.getMessage(MSG_SUCCESS, processed, fixed, logFile);
return msg;
}
}
@SuppressWarnings("unchecked")

View File

@@ -25,7 +25,6 @@ import org.alfresco.repo.domain.ChildAssoc;
import org.alfresco.repo.domain.Node;
import org.alfresco.service.cmr.repository.ChildAssociationRef;
import org.alfresco.service.namespace.QName;
import org.alfresco.util.EqualsHelper;
/**
* @author Derek Hulley
@@ -124,9 +123,12 @@ public class ChildAssocImpl implements ChildAssoc, Serializable
{
StringBuffer sb = new StringBuffer(32);
sb.append("ChildAssoc")
.append("[ parent=").append(parent)
.append(", child=").append(child)
.append("[ id=").append(id)
.append(", parent=").append(parent.getId())
.append(", child=").append(child.getId())
.append(", child name=").append(childNodeName)
.append(", child name crc=").append(childNodeNameCrc)
.append(", assoc type=").append(getTypeQName())
.append(", assoc name=").append(getQname())
.append(", isPrimary=").append(isPrimary)
.append("]");

View File

@@ -29,7 +29,7 @@
unique="false"
not-null="false"
cascade="none" />
<property name="changeTxnId" column="change_txn_id" type="string" length="56" not-null="true" index="CHANGE_TXN_ID"/>
<property name="changeTxnId" column="change_txn_id" type="string" length="56" not-null="true" />
</class>
<class

View File

@@ -67,6 +67,7 @@ public class SchemaBootstrap extends AbstractLifecycleBean
private static final String PLACEHOLDER_SCRIPT_DIALECT = "\\$\\{db\\.script\\.dialect\\}";
private static final String MSG_EXECUTING_SCRIPT = "schema.update.msg.executing_script";
private static final String MSG_OPTIONAL_STATEMENT_FAILED = "schema.update.msg.optional_statement_failed";
private static final String ERR_STATEMENT_FAILED = "schema.update.err.statement_failed";
private static final String ERR_UPDATE_FAILED = "schema.update.err.update_failed";
private static final String ERR_VALIDATION_FAILED = "schema.update.err.validation_failed";
@@ -504,10 +505,18 @@ public class SchemaBootstrap extends AbstractLifecycleBean
}
// have we reached the end of a statement?
boolean execute = false;
boolean optional = false;
if (sql.endsWith(";"))
{
sql = sql.substring(0, sql.length() - 1);
execute = true;
optional = false;
}
else if (sql.endsWith(";(optional)"))
{
sql = sql.substring(0, sql.length() - 11);
execute = true;
optional = true;
}
// append to the statement being built up
sb.append(" ").append(sql);
@@ -515,7 +524,7 @@ public class SchemaBootstrap extends AbstractLifecycleBean
if (execute)
{
sql = sb.toString();
executeStatement(connection, sql, line, scriptFile);
executeStatement(connection, sql, optional, line, scriptFile);
sb = new StringBuilder(1024);
}
}
@@ -531,7 +540,7 @@ public class SchemaBootstrap extends AbstractLifecycleBean
* Execute the given SQL statement, absorbing exceptions that we expect during
* schema creation or upgrade.
*/
private void executeStatement(Connection connection, String sql, int line, File file) throws Exception
private void executeStatement(Connection connection, String sql, boolean optional, int line, File file) throws Exception
{
Statement stmt = connection.createStatement();
try
@@ -544,10 +553,18 @@ public class SchemaBootstrap extends AbstractLifecycleBean
}
catch (SQLException e)
{
String msg = I18NUtil.getMessage(ERR_STATEMENT_FAILED, sql, e.getMessage(), file.getAbsolutePath(), line);
// ignore exceptions generated by the creation of indexes that already exist
logger.error(msg);
throw e;
if (optional)
{
// it was marked as optional, so we just ignore it
String msg = I18NUtil.getMessage(MSG_OPTIONAL_STATEMENT_FAILED, sql, e.getMessage(), file.getAbsolutePath(), line);
logger.warn(msg);
}
else
{
String err = I18NUtil.getMessage(ERR_STATEMENT_FAILED, sql, e.getMessage(), file.getAbsolutePath(), line);
logger.error(err);
throw e;
}
}
finally
{

View File

@@ -158,7 +158,7 @@ public class CategoryNode extends Node
}
@Override
public boolean isCategory()
public boolean getIsCategory()
{
return true;
}

View File

@@ -427,7 +427,7 @@ public class Node implements Serializable, Scopeable
/**
* @return true if this Node is a container (i.e. a folder)
*/
public boolean isContainer()
public boolean getIsContainer()
{
if (isContainer == null)
{
@@ -441,13 +441,13 @@ public class Node implements Serializable, Scopeable
public boolean jsGet_isContainer()
{
return isContainer();
return getIsContainer();
}
/**
* @return true if this Node is a Document (i.e. with content)
*/
public boolean isDocument()
public boolean getIsDocument()
{
if (isDocument == null)
{
@@ -460,13 +460,13 @@ public class Node implements Serializable, Scopeable
public boolean jsGet_isDocument()
{
return isDocument();
return getIsDocument();
}
/**
* @return true if the Node is a Category
*/
public boolean isCategory()
public boolean getIsCategory()
{
// this valid is overriden by the CategoryNode sub-class
return false;
@@ -474,7 +474,7 @@ public class Node implements Serializable, Scopeable
public boolean jsGet_isCategory()
{
return isCategory();
return getIsCategory();
}
/**
@@ -567,7 +567,7 @@ public class Node implements Serializable, Scopeable
{
if (this.imageResolver != null)
{
if (isDocument())
if (getIsDocument())
{
return this.imageResolver.resolveImagePathForName(getName(), true);
}
@@ -594,7 +594,7 @@ public class Node implements Serializable, Scopeable
{
if (this.imageResolver != null)
{
if (isDocument())
if (getIsDocument())
{
return this.imageResolver.resolveImagePathForName(getName(), false);
}
@@ -740,7 +740,7 @@ public class Node implements Serializable, Scopeable
*/
public String getUrl()
{
if (isDocument() == true)
if (getIsDocument() == true)
{
try
{
@@ -1646,7 +1646,7 @@ public class Node implements Serializable, Scopeable
this.imageResolver);
// add the current node as either the document/space as appropriate
if (this.isDocument())
if (this.getIsDocument())
{
model.put("document", new TemplateNode(this.nodeRef, this.services, this.imageResolver));
model.put("space", new TemplateNode(getPrimaryParentAssoc().getParentRef(), this.services, this.imageResolver));

View File

@@ -834,8 +834,16 @@ public class IndexInfo
// luceneIndexer.flushPending();
IndexReader deltaReader = buildAndRegisterDeltaReader(id);
IndexReader reader = new MultiReader(new IndexReader[] {
IndexReader reader = null;
if (deletions == null || deletions.size() == 0)
{
reader = new MultiReader(new IndexReader[] {mainIndexReader, deltaReader });
}
else
{
reader = new MultiReader(new IndexReader[] {
new FilterIndexReaderByNodeRefs2(mainIndexReader, deletions, deleteOnlyNodes), deltaReader });
}
reader = ReferenceCountingReadOnlyIndexReaderFactory.createReader("MainReader"+id, reader);
ReferenceCounting refCounting = (ReferenceCounting)reader;
refCounting.incrementReferenceCount();