mirror of
https://github.com/Alfresco/alfresco-community-repo.git
synced 2025-06-30 18:15:39 +00:00
43598: Merged HEAD to BRANCHES/DEV/V4.1-BUG-FIX *RECORD ONLY* 41906: ALF-11378: REST API has been modified to return extra information about a user whether s/he belongs to a group or not. 44003: Merged BRANCHES/DEV/BELARUS/V4.1-BUG-FIX-2012_11_22 to BRANCHES/DEV/V4.1-BUG-FIX: ALF-15210: Inconsistency in the '?' icon over the dashlets projects\slingshot\source\web\js\share.js DashletTitleBarActions_onReady() function was updated. Fix initialize the style for actionsNode elements into the DOM for IE. 44004: Merged BRANCHES/DEV/BELARUS/V4.1-BUG-FIX-2012_11_22 to V4.1-BUG-FIX: ALF-15793: edit offline hides version history projects\slingshot\source\web\components\document-details\document-actions.js onActionUploadNewVersion function was updated. Fix sets version variable correctly now using asset.workingCopy property instead asset.custom property, which is undefined. 44018: ALF-16540 : CMIS: createDocument with VersioningState.CHECKEDOUT causes NodeLockedException for types with mandatory versionable aspect The ckeck for lock was disabled when beforeCreateVersion policy is handled. Version could be created for a locked node. 44054: Fix for ALF-16337. Datalist assignee not searchable by full name. 44056: Trivial change. Fixing some compiler warnings under org.alfresco.repo.content.metadata including a noisy Tika one. 44143: Merged BRANCHES/DEV/BELARUS/V4.1-BUG-FIX-2012_10_19 to BRANCHES/DEV/V4.1-BUG-FIX: 42989: ALF-16331: Wrong user for "completed by" information provided in Group Review And Approve workflow 44147: Merged BRANCHES/DEV/V3.4-BUG-FIX to BRANCHES/DEV/V4.1-BUG-FIX: 44146: Merged BRANCHES/DEV/BELARUS/V3.4-BUG-FIX-2012_05_22 to BRANCHES/DEV/V3.4-BUG-FIX: 37733: ALF-12051: Webdav - Cannot open files containing "?" character in the filename in WinXP 44152: ALF-17009 : Merged V3.4-BUG-FIX (3.4.12) to V4.1-BUG-FIX (4.1.3) 44151: ALF-14035 Tiny HTML file that causes Jodconverter to launch a 100% CPU soffice instance - HTML to PDF is now done via ODT as the direct transform hangs if there are <sub> tags in the HTML. - Added in 'unsupportedTransformations' to stop a bare transformer.JodConverter from doing HTML to PDF - TransformerDebug test file debugTransformers.txt no longer needs to be 18 bytes, as it made it too fiddly. - Modified debug from RuntimeExec so less editing is required to running from the command line - Removed tabs that had been added to enterprise/content-services-context.xml in 4.1-BUG-FIX 44192: ALF-16560 - CIFS: Word document version history lost after saving content in Word:mac 2011 on Mac Mountain Lion 44224: ALF-16896 Exception with TIKA meta data extractor. - Patch POI to handle parsing of Unicode properties that starts on a 4 byte boundary rather than the specified offset. Example file was created using http://www.aspose.com/ 44241: Merged DEV to V4.1-BUG-FIX 44208: ALF-14591 : Ordering not supported for IMAP properties defining IMAP sort fields in Share Make properties from imap:imapContent aspect indexable for SOLR. 44253: Merged BRANCHES/DEV/AMILLER/CLOUD1 to BRANCHES/DEV/V4.1-BUG-FIX: 38927: CLOUD-128 - Update rules works incorrectly This is a partial fix for ALF-14568. The rest is coming in a separate check-in. I made some minor adjustments to this change - trivial spelling fix and whitespace changes. 44257: ALF-16563 - CIFS: Image document version history lost after saving content in Preview on Mac Mountain Lion 44260: Fix for ALF-16430 - List of values shown in alphabetical order in Share Forms. Values now only sorted if the Forms config 'field' element has the sorted='true' attribute. 44269: Completion of fix for ALF-14568 - Update rule works incorrectly. 44318: Fix for ALF-17055 - remoteadm webscript set a Last-Modified HTTP header whose date format does not conform to RFC 2616 hence breaking proxy caching 44320: Fix for ALF-16463 - documentLibrary RSS feed does not pass the w3c validator, in particular pubDate breaks RFC-822, date not displayed when using non English locale 44352: Merged BRANCHES/DEV/BELARUS/V4.1-BUG-FIX-2012_11_12 to BRANCHES/DEV/V4.1-BUG-FIX 43860: ALF-16263: Search using a "Stop Word" not displaying any result git-svn-id: https://svn.alfresco.com/repos/alfresco-enterprise/alfresco/HEAD/root@44459 c4b6b30b-aa2e-2d43-bbcb-ca4b014f7261
540 lines
20 KiB
Java
540 lines
20 KiB
Java
/*
|
|
* Copyright (C) 2005-2012 Alfresco Software Limited.
|
|
*
|
|
* This file is part of Alfresco
|
|
*
|
|
* Alfresco is free software: you can redistribute it and/or modify
|
|
* it under the terms of the GNU Lesser General Public License as published by
|
|
* the Free Software Foundation, either version 3 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* Alfresco is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public License
|
|
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
package org.alfresco.repo.content.metadata;
|
|
|
|
import java.io.IOException;
|
|
import java.io.InputStream;
|
|
import java.io.OutputStream;
|
|
import java.io.Serializable;
|
|
import java.text.DateFormat;
|
|
import java.text.ParseException;
|
|
import java.text.SimpleDateFormat;
|
|
import java.util.ArrayList;
|
|
import java.util.Collection;
|
|
import java.util.Date;
|
|
import java.util.HashMap;
|
|
import java.util.HashSet;
|
|
import java.util.Locale;
|
|
import java.util.Map;
|
|
import java.util.TimeZone;
|
|
|
|
import org.alfresco.repo.content.MimetypeMap;
|
|
import org.alfresco.repo.content.filestore.FileContentReader;
|
|
import org.alfresco.service.cmr.repository.ContentReader;
|
|
import org.alfresco.service.cmr.repository.ContentWriter;
|
|
import org.alfresco.service.cmr.repository.datatype.DefaultTypeConverter;
|
|
import org.alfresco.service.cmr.repository.datatype.TypeConversionException;
|
|
import org.apache.commons.logging.Log;
|
|
import org.apache.commons.logging.LogFactory;
|
|
import org.apache.tika.embedder.Embedder;
|
|
import org.apache.tika.io.TemporaryResources;
|
|
import org.apache.tika.io.TikaInputStream;
|
|
import org.apache.tika.metadata.Metadata;
|
|
import org.apache.tika.mime.MediaType;
|
|
import org.apache.tika.parser.ParseContext;
|
|
import org.apache.tika.parser.Parser;
|
|
import org.apache.tika.sax.ContentHandlerDecorator;
|
|
import org.apache.tika.sax.XHTMLContentHandler;
|
|
import org.apache.tika.sax.xpath.Matcher;
|
|
import org.apache.tika.sax.xpath.MatchingContentHandler;
|
|
import org.apache.tika.sax.xpath.XPathParser;
|
|
import org.xml.sax.Attributes;
|
|
import org.xml.sax.ContentHandler;
|
|
import org.xml.sax.Locator;
|
|
import org.xml.sax.SAXException;
|
|
|
|
/**
|
|
* The parent of all Metadata Extractors which use
|
|
* Apache Tika under the hood.
|
|
* This handles all the common parts of processing the
|
|
* files, and the common mappings.
|
|
* Individual extractors extend from this to do custom
|
|
* mappings.
|
|
|
|
* <pre>
|
|
* <b>author:</b> -- cm:author
|
|
* <b>title:</b> -- cm:title
|
|
* <b>subject:</b> -- cm:description
|
|
* <b>created:</b> -- cm:created
|
|
* <b>comments:</b>
|
|
* </pre>
|
|
*
|
|
* @since 3.4
|
|
* @author Nick Burch
|
|
*/
|
|
public abstract class TikaPoweredMetadataExtracter
|
|
extends AbstractMappingMetadataExtracter
|
|
implements MetadataEmbedder
|
|
{
|
|
protected static Log logger = LogFactory.getLog(TikaPoweredMetadataExtracter.class);
|
|
|
|
protected static final String KEY_AUTHOR = "author";
|
|
protected static final String KEY_TITLE = "title";
|
|
protected static final String KEY_SUBJECT = "subject";
|
|
protected static final String KEY_CREATED = "created";
|
|
protected static final String KEY_DESCRIPTION = "description";
|
|
protected static final String KEY_COMMENTS = "comments";
|
|
|
|
private DateFormat[] tikaDateFormats;
|
|
|
|
/**
|
|
* Builds up a list of supported mime types by merging an explicit
|
|
* list with any that Tika also claims to support
|
|
*/
|
|
protected static ArrayList<String> buildSupportedMimetypes(String[] explicitTypes, Parser... tikaParsers)
|
|
{
|
|
ArrayList<String> types = new ArrayList<String>();
|
|
for(String type : explicitTypes)
|
|
{
|
|
if(!types.contains(type))
|
|
{
|
|
types.add(type);
|
|
}
|
|
}
|
|
if(tikaParsers != null)
|
|
{
|
|
for(Parser tikaParser : tikaParsers)
|
|
{
|
|
for(MediaType mt : tikaParser.getSupportedTypes(new ParseContext()))
|
|
{
|
|
String type = mt.toString();
|
|
if(!types.contains(type))
|
|
{
|
|
types.add(type);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return types;
|
|
}
|
|
|
|
public TikaPoweredMetadataExtracter(ArrayList<String> supportedMimeTypes)
|
|
{
|
|
this(new HashSet<String>(supportedMimeTypes), null);
|
|
}
|
|
public TikaPoweredMetadataExtracter(ArrayList<String> supportedMimeTypes, ArrayList<String> supportedEmbedMimeTypes)
|
|
{
|
|
this(new HashSet<String>(supportedMimeTypes), new HashSet<String>(supportedEmbedMimeTypes));
|
|
}
|
|
public TikaPoweredMetadataExtracter(HashSet<String> supportedMimeTypes)
|
|
{
|
|
this(supportedMimeTypes, null);
|
|
}
|
|
public TikaPoweredMetadataExtracter(HashSet<String> supportedMimeTypes, HashSet<String> supportedEmbedMimeTypes)
|
|
{
|
|
super(supportedMimeTypes, supportedEmbedMimeTypes);
|
|
|
|
// TODO Once TIKA-451 is fixed this list will get nicer
|
|
this.tikaDateFormats = new DateFormat[] {
|
|
new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"),
|
|
new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'", Locale.US),
|
|
new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ"),
|
|
new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.US),
|
|
new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss"),
|
|
new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss", Locale.US),
|
|
new SimpleDateFormat("yyyy-MM-dd"),
|
|
new SimpleDateFormat("yyyy-MM-dd", Locale.US),
|
|
new SimpleDateFormat("yyyy/MM/dd HH:mm:ss"),
|
|
new SimpleDateFormat("yyyy/MM/dd HH:mm:ss", Locale.US),
|
|
new SimpleDateFormat("yyyy/MM/dd"),
|
|
new SimpleDateFormat("yyyy/MM/dd", Locale.US),
|
|
new SimpleDateFormat("EEE MMM dd hh:mm:ss zzz yyyy"),
|
|
new SimpleDateFormat("EEE MMM dd hh:mm:ss zzz yyyy", Locale.US)
|
|
};
|
|
// Set the timezone on the UTC based formats
|
|
for(DateFormat df : this.tikaDateFormats)
|
|
{
|
|
if(df instanceof SimpleDateFormat)
|
|
{
|
|
SimpleDateFormat sdf = (SimpleDateFormat)df;
|
|
if(sdf.toPattern().endsWith("'Z'"))
|
|
{
|
|
sdf.setTimeZone(TimeZone.getTimeZone("UTC"));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Version which also tries the ISO-8601 formats (in order..),
|
|
* and similar formats, which Tika makes use of
|
|
*/
|
|
@Override
|
|
protected Date makeDate(String dateStr)
|
|
{
|
|
// Try our formats first, in order
|
|
for(DateFormat df : this.tikaDateFormats)
|
|
{
|
|
try
|
|
{
|
|
return df.parse(dateStr);
|
|
}
|
|
catch (ParseException ee)
|
|
{
|
|
// Didn't work
|
|
}
|
|
}
|
|
|
|
// Fall back to the normal ones
|
|
return super.makeDate(dateStr);
|
|
}
|
|
|
|
/**
|
|
* Returns the correct Tika Parser to process
|
|
* the document.
|
|
* If you don't know which you want, use
|
|
* {@link TikaAutoMetadataExtracter} which
|
|
* makes use of the Tika auto-detection.
|
|
*/
|
|
protected abstract Parser getParser();
|
|
|
|
/**
|
|
* Returns the Tika Embedder to modify
|
|
* the document.
|
|
*
|
|
* @return the Tika embedder
|
|
*/
|
|
protected Embedder getEmbedder()
|
|
{
|
|
// TODO make this an abstract method once more extracters support embedding
|
|
return null;
|
|
}
|
|
|
|
/**
|
|
* Do we care about the contents of the
|
|
* extracted header, or nothing at all?
|
|
*/
|
|
protected boolean needHeaderContents()
|
|
{
|
|
return false;
|
|
}
|
|
|
|
/**
|
|
* Allows implementation specific mappings
|
|
* to be done.
|
|
*/
|
|
protected Map<String, Serializable> extractSpecific(Metadata metadata,
|
|
Map<String, Serializable> properties, Map<String,String> headers)
|
|
{
|
|
return properties;
|
|
}
|
|
|
|
/**
|
|
* There seems to be some sort of issue with some downstream
|
|
* 3rd party libraries, and input streams that come from
|
|
* a {@link ContentReader}. This happens most often with
|
|
* JPEG and Tiff files.
|
|
* For these cases, buffer out to a local file if not
|
|
* already there
|
|
*/
|
|
protected InputStream getInputStream(ContentReader reader) throws IOException {
|
|
// Prefer the File if available, it's generally quicker
|
|
if(reader instanceof FileContentReader)
|
|
{
|
|
return TikaInputStream.get( ((FileContentReader)reader).getFile() );
|
|
}
|
|
|
|
// Grab the InputStream for the Content
|
|
InputStream input = reader.getContentInputStream();
|
|
|
|
// Images currently always require a file
|
|
if(MimetypeMap.MIMETYPE_IMAGE_JPEG.equals(reader.getMimetype()) ||
|
|
MimetypeMap.MIMETYPE_IMAGE_TIFF.equals(reader.getMimetype()))
|
|
{
|
|
TemporaryResources tmp = new TemporaryResources();
|
|
TikaInputStream stream = TikaInputStream.get(input, tmp);
|
|
stream.getFile(); // Have it turned into File backed
|
|
return stream;
|
|
}
|
|
else
|
|
{
|
|
// The regular Content InputStream should be fine
|
|
return input;
|
|
}
|
|
}
|
|
|
|
@SuppressWarnings("deprecation")
|
|
@Override
|
|
protected Map<String, Serializable> extractRaw(ContentReader reader) throws Throwable
|
|
{
|
|
Map<String, Serializable> rawProperties = newRawMap();
|
|
|
|
InputStream is = null;
|
|
try
|
|
{
|
|
is = getInputStream(reader);
|
|
Parser parser = getParser();
|
|
ParseContext context = new ParseContext();
|
|
|
|
Metadata metadata = new Metadata();
|
|
metadata.add(Metadata.CONTENT_TYPE, reader.getMimetype());
|
|
|
|
ContentHandler handler;
|
|
Map<String,String> headers = null;
|
|
if(needHeaderContents())
|
|
{
|
|
MapCaptureContentHandler headerCapture =
|
|
new MapCaptureContentHandler();
|
|
headers = headerCapture.tags;
|
|
handler = new HeadContentHandler(headerCapture);
|
|
}
|
|
else
|
|
{
|
|
handler = new NullContentHandler();
|
|
}
|
|
|
|
parser.parse(is, handler, metadata, context);
|
|
|
|
// First up, copy all the Tika metadata over
|
|
// This allows people to map any of the Tika
|
|
// keys onto their own content model
|
|
for(String tikaKey : metadata.names())
|
|
{
|
|
putRawValue(tikaKey, metadata.get(tikaKey), rawProperties);
|
|
}
|
|
|
|
// Now, map the common Tika metadata keys onto
|
|
// the common Alfresco metadata keys. This allows
|
|
// existing mapping properties files to continue
|
|
// to work without needing any changes
|
|
|
|
// The simple ones
|
|
putRawValue(KEY_AUTHOR, metadata.get(Metadata.AUTHOR), rawProperties);
|
|
putRawValue(KEY_TITLE, metadata.get(Metadata.TITLE), rawProperties);
|
|
putRawValue(KEY_COMMENTS, metadata.get(Metadata.COMMENTS), rawProperties);
|
|
|
|
// Get the subject and description, despite things not
|
|
// being nearly as consistent as one might hope
|
|
String subject = metadata.get(Metadata.SUBJECT);
|
|
String description = metadata.get(Metadata.DESCRIPTION);
|
|
if(subject != null && description != null)
|
|
{
|
|
putRawValue(KEY_DESCRIPTION, description, rawProperties);
|
|
putRawValue(KEY_SUBJECT, subject, rawProperties);
|
|
}
|
|
else if(subject != null)
|
|
{
|
|
putRawValue(KEY_DESCRIPTION, subject, rawProperties);
|
|
putRawValue(KEY_SUBJECT, subject, rawProperties);
|
|
}
|
|
else if(description != null)
|
|
{
|
|
putRawValue(KEY_DESCRIPTION, description, rawProperties);
|
|
putRawValue(KEY_SUBJECT, description, rawProperties);
|
|
}
|
|
|
|
// Try for the dates two different ways too
|
|
if(metadata.get(Metadata.CREATION_DATE) != null)
|
|
{
|
|
putRawValue(KEY_CREATED, metadata.get(Metadata.CREATION_DATE), rawProperties);
|
|
}
|
|
else if(metadata.get(Metadata.DATE) != null)
|
|
{
|
|
putRawValue(KEY_CREATED, metadata.get(Metadata.DATE), rawProperties);
|
|
}
|
|
|
|
// If people created a specific instance
|
|
// (eg OfficeMetadataExtractor), then allow that
|
|
// instance to map the Tika keys onto its
|
|
// existing namespace so that older properties
|
|
// files continue to map correctly
|
|
rawProperties = extractSpecific(metadata, rawProperties, headers);
|
|
}
|
|
finally
|
|
{
|
|
if (is != null)
|
|
{
|
|
try { is.close(); } catch (IOException e) {}
|
|
}
|
|
}
|
|
|
|
return rawProperties;
|
|
}
|
|
|
|
@Override
|
|
protected void embedInternal(Map<String, Serializable> properties, ContentReader reader, ContentWriter writer) throws Throwable
|
|
{
|
|
Embedder embedder = getEmbedder();
|
|
if (embedder == null)
|
|
{
|
|
return;
|
|
}
|
|
OutputStream outputStream = null;
|
|
try
|
|
{
|
|
Metadata metadataToEmbed = new Metadata();
|
|
for (String metadataKey : properties.keySet())
|
|
{
|
|
Serializable value = properties.get(metadataKey);
|
|
if (value == null)
|
|
{
|
|
continue;
|
|
}
|
|
if (value instanceof Collection<?>)
|
|
{
|
|
for (Object singleValue : (Collection<?>) value)
|
|
{
|
|
try
|
|
{
|
|
// Convert to a string value for Tika
|
|
metadataToEmbed.add(metadataKey, DefaultTypeConverter.INSTANCE.convert(String.class, singleValue));
|
|
}
|
|
catch (TypeConversionException e)
|
|
{
|
|
logger.info("Could not convert " + metadataKey + ": " + e.getMessage());
|
|
}
|
|
}
|
|
}
|
|
else
|
|
{
|
|
try
|
|
{
|
|
// Convert to a string value for Tika
|
|
metadataToEmbed.add(metadataKey, DefaultTypeConverter.INSTANCE.convert(String.class, value));
|
|
}
|
|
catch (TypeConversionException e)
|
|
{
|
|
logger.info("Could not convert " + metadataKey + ": " + e.getMessage());
|
|
}
|
|
}
|
|
}
|
|
InputStream inputStream = getInputStream(reader);
|
|
outputStream = writer.getContentOutputStream();
|
|
embedder.embed(metadataToEmbed, inputStream, outputStream, null);
|
|
}
|
|
catch (Exception e)
|
|
{
|
|
logger.error(e.getMessage(), e);
|
|
}
|
|
finally
|
|
{
|
|
if (outputStream != null)
|
|
{
|
|
try { outputStream.close(); } catch (Throwable e) {}
|
|
}
|
|
}
|
|
|
|
}
|
|
|
|
/**
|
|
* This content handler will capture entries from within
|
|
* the header of the Tika content XHTML, but ignore the
|
|
* rest.
|
|
*/
|
|
protected static class HeadContentHandler extends ContentHandlerDecorator
|
|
{
|
|
/**
|
|
* XHTML XPath parser.
|
|
*/
|
|
private static final XPathParser PARSER =
|
|
new XPathParser("xhtml", XHTMLContentHandler.XHTML);
|
|
|
|
/**
|
|
* The XPath matcher used to select the XHTML body contents.
|
|
*/
|
|
private static final Matcher MATCHER =
|
|
PARSER.parse("/xhtml:html/xhtml:head/descendant:node()");
|
|
|
|
/**
|
|
* Creates a content handler that passes all XHTML body events to the
|
|
* given underlying content handler.
|
|
*
|
|
* @param handler content handler
|
|
*/
|
|
protected HeadContentHandler(ContentHandler handler)
|
|
{
|
|
super(new MatchingContentHandler(handler, MATCHER));
|
|
}
|
|
}
|
|
/**
|
|
* This content handler will grab all tags and attributes,
|
|
* and record the textual content of the last seen one
|
|
* of them.
|
|
* Normally only used with {@link HeadContentHandler}
|
|
*/
|
|
protected static class MapCaptureContentHandler implements ContentHandler
|
|
{
|
|
protected Map<String,String> tags =
|
|
new HashMap<String, String>();
|
|
private StringBuffer text;
|
|
|
|
public void characters(char[] ch, int start, int len)
|
|
{
|
|
if(text != null)
|
|
{
|
|
text.append(ch, start, len);
|
|
}
|
|
}
|
|
public void endElement(String namespace, String localname, String qname)
|
|
{
|
|
if(text != null && text.length() > 0)
|
|
{
|
|
tags.put(qname, text.toString());
|
|
}
|
|
text = null;
|
|
}
|
|
public void startElement(String namespace, String localname, String qname, Attributes attrs)
|
|
{
|
|
for(int i=0; i<attrs.getLength(); i++)
|
|
{
|
|
tags.put(attrs.getQName(i), attrs.getValue(i));
|
|
}
|
|
text = new StringBuffer();
|
|
}
|
|
|
|
public void endDocument() throws SAXException {}
|
|
public void endPrefixMapping(String paramString) throws SAXException {}
|
|
public void ignorableWhitespace(char[] paramArrayOfChar, int paramInt1,
|
|
int paramInt2) throws SAXException {}
|
|
public void processingInstruction(String paramString1, String paramString2)
|
|
throws SAXException {}
|
|
public void setDocumentLocator(Locator paramLocator) {}
|
|
public void skippedEntity(String paramString) throws SAXException {}
|
|
public void startDocument() throws SAXException {}
|
|
public void startPrefixMapping(String paramString1, String paramString2)
|
|
throws SAXException {}
|
|
}
|
|
/**
|
|
* A content handler that ignores all the content it finds.
|
|
* Normally used when we only want the metadata, and don't
|
|
* care about the file contents.
|
|
*/
|
|
protected static class NullContentHandler implements ContentHandler
|
|
{
|
|
public void characters(char[] paramArrayOfChar, int paramInt1,
|
|
int paramInt2) throws SAXException {}
|
|
public void endDocument() throws SAXException {}
|
|
public void endElement(String paramString1, String paramString2,
|
|
String paramString3) throws SAXException {}
|
|
public void endPrefixMapping(String paramString) throws SAXException {}
|
|
public void ignorableWhitespace(char[] paramArrayOfChar, int paramInt1,
|
|
int paramInt2) throws SAXException {}
|
|
public void processingInstruction(String paramString1, String paramString2)
|
|
throws SAXException {}
|
|
public void setDocumentLocator(Locator paramLocator) {}
|
|
public void skippedEntity(String paramString) throws SAXException {}
|
|
public void startDocument() throws SAXException {}
|
|
public void startElement(String paramString1, String paramString2,
|
|
String paramString3, Attributes paramAttributes)
|
|
throws SAXException {}
|
|
public void startPrefixMapping(String paramString1, String paramString2)
|
|
throws SAXException {}
|
|
}
|
|
}
|