diff --git a/warehouse/core/src/main/java/datawave/data/hash/SnowflakeUIDBuilder.java b/warehouse/core/src/main/java/datawave/data/hash/SnowflakeUIDBuilder.java index 5d823f9caba..8baacddbb76 100644 --- a/warehouse/core/src/main/java/datawave/data/hash/SnowflakeUIDBuilder.java +++ b/warehouse/core/src/main/java/datawave/data/hash/SnowflakeUIDBuilder.java @@ -9,7 +9,8 @@ import java.util.Map; import org.apache.commons.cli.Option; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Builds a sequence of SnowflakeUIDs for a particular "machine" instance, which is based on a unique combination of host, process, and process thread. @@ -18,7 +19,7 @@ public class SnowflakeUIDBuilder extends AbstractUIDBuilder { private static final BigInteger UNDEFINED_MACHINE_ID = BigInteger.valueOf(-1); private static final BigInteger UNDEFINED_SNOWFLAKE = BigInteger.valueOf(-1); - private static final Logger LOGGER = Logger.getLogger(SnowflakeUIDBuilder.class); + private static final Logger LOGGER = LoggerFactory.getLogger(SnowflakeUIDBuilder.class); private final BigInteger mid; @@ -259,10 +260,7 @@ protected static int newMachineId(final Map options) { try { hostId = Integer.parseInt(option.getValue()); } catch (final Exception e) { - if (LOGGER.isDebugEnabled()) { - final String message = "Invalid " + HOST_INDEX_OPT + ": " + option; - LOGGER.warn(message, e); - } + LOGGER.warn("Invalid {}: {}", HOST_INDEX_OPT, option); } } @@ -271,10 +269,7 @@ protected static int newMachineId(final Map options) { try { processId = Integer.parseInt(option.getValue()); } catch (final Exception e) { - if (LOGGER.isDebugEnabled()) { - final String message = "Invalid " + PROCESS_INDEX_OPT + ": " + option; - LOGGER.warn(message, e); - } + LOGGER.warn("Invalid {}: {}", PROCESS_INDEX_OPT, option); } } @@ -283,20 +278,14 @@ protected static int newMachineId(final Map options) { try { threadId = Integer.parseInt(option.getValue()); } catch (final Exception e) { - if (LOGGER.isDebugEnabled()) { - final String message = "Invalid " + THREAD_INDEX_OPT + ": " + option; - LOGGER.warn(message, e); - } + LOGGER.warn("Invalid {}: {}", THREAD_INDEX_OPT, option); } } try { machineId = validateMachineIds(hostId, processId, threadId).intValue(); } catch (Exception e) { - if (LOGGER.isDebugEnabled()) { - final String message = "Unable to generate Snowflake machine ID"; - LOGGER.warn(message, e); - } + LOGGER.warn("Unable to generate Snowflake machine ID", e); } return machineId; @@ -387,8 +376,8 @@ private long validateTimestamp(long timestamp) { } if (timestamp <= this.previousTid) { - LOGGER.warn("Current tid is less than the previous. This could cause uid collisions.\n" + "Mid: " + mid + ", Timestamp: " + timestamp - + ", Previous: " + previousTid + ", System Time: " + System.currentTimeMillis()); + LOGGER.warn("Current tid is less than the previous. This could cause uid collisions.\nMid: {}, Timestamp: {}, Previous: {}, System Time: {}", mid, + timestamp, previousTid, System.currentTimeMillis()); timestamp = this.previousTid + 1; } @@ -404,12 +393,9 @@ private void storeTimestamp() { if (ZkSnowflakeCache.isInitialized()) { try { ZkSnowflakeCache.store(mid, this.previousTid); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Caching ZK ts: " + this.previousTid + ", mid: " + this.mid); - } - + LOGGER.debug("Caching ZK ts: {}, mid: {}", this.previousTid, this.mid); } catch (Exception e) { - LOGGER.error("Unable to store snowflake id from zookeeper for " + mid, e); + LOGGER.error("Unable to store snowflake id from zookeeper for {}", mid, e); throw new RuntimeException(e); } } @@ -421,14 +407,10 @@ private long initializeTimestamp() { if (ZkSnowflakeCache.isInitialized()) { try { lastCachedTid = ZkSnowflakeCache.getLastCachedTid(this.mid); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Getting ZK ts: " + lastCachedTid + " mid: " + this.mid); - } - + LOGGER.debug("Getting ZK ts: {}, mid: {}", lastCachedTid, this.mid); } catch (Exception e) { - LOGGER.error("Unable to retrieve snowflake id from zookeeper for " + mid, e); + LOGGER.error("Unable to retrieve snowflake id from zookeeper for {}", mid, e); throw new RuntimeException(e); - } } if (lastCachedTid > 0) { diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/TableCreator.java b/warehouse/ingest-core/src/main/java/datawave/ingest/TableCreator.java index 131b994feac..0316b69fd00 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/TableCreator.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/TableCreator.java @@ -1,7 +1,8 @@ package datawave.ingest; import org.apache.hadoop.conf.Configuration; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import datawave.ingest.mapreduce.job.TableConfigurationUtil; @@ -9,7 +10,7 @@ public class TableCreator { private static final Configuration config = new Configuration(); - private static final Logger log = Logger.getLogger(TableCreator.class); + private static final Logger log = LoggerFactory.getLogger(TableCreator.class); public static void main(String[] args) { Configuration conf = OptionsParser.parseArguments(args, config); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/config/BaseHdfsFileCacheUtil.java b/warehouse/ingest-core/src/main/java/datawave/ingest/config/BaseHdfsFileCacheUtil.java index facc250aa68..d41c067e487 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/config/BaseHdfsFileCacheUtil.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/config/BaseHdfsFileCacheUtil.java @@ -9,7 +9,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import datawave.ingest.data.config.ingest.AccumuloHelper; @@ -23,7 +24,7 @@ public abstract class BaseHdfsFileCacheUtil { private static final int MAX_RETRIES = 3; protected short cacheReplicas = 3; - private static final Logger log = Logger.getLogger(BaseHdfsFileCacheUtil.class); + private static final Logger log = LoggerFactory.getLogger(BaseHdfsFileCacheUtil.class); public BaseHdfsFileCacheUtil(Configuration conf) { Validate.notNull(conf, "Configuration object passed in null"); @@ -48,7 +49,7 @@ public void read() throws IOException { while (retry && attempts <= MAX_RETRIES) { attempts++; - log.info("Reading cache at " + this.cacheFilePath); + log.info("Reading cache at {}", this.cacheFilePath); try (BufferedReader in = new BufferedReader(new InputStreamReader(FileSystem.get(this.cacheFilePath.toUri(), conf).open(this.cacheFilePath)))) { readCache(in); retry = false; @@ -80,7 +81,7 @@ public void update() { cleanup(fs, tempFile); } - log.error("Unable to update cache file " + cacheFilePath + ". " + e.getMessage(), e); + log.error("Unable to update cache file {}. {}", cacheFilePath, e.getMessage(), e); } } @@ -99,10 +100,10 @@ public void createCacheFile(FileSystem fs, Path tmpCacheFile) { throw new IOException("Failed to rename temporary cache file"); } } catch (Exception e) { - log.warn("Unable to rename " + tmpCacheFile + " to " + this.cacheFilePath + "probably because somebody else replaced it ", e); + log.warn("Unable to rename {} to {} probably because somebody else replaced it", tmpCacheFile, this.cacheFilePath, e); cleanup(fs, tmpCacheFile); } - log.info("Updated " + cacheFilePath); + log.info("Updated {}", cacheFilePath); } @@ -110,7 +111,7 @@ protected void cleanup(FileSystem fs, Path tmpCacheFile) { try { fs.delete(tmpCacheFile, false); } catch (Exception e) { - log.error("Unable to clean up " + tmpCacheFile, e); + log.error("Unable to clean up {}", tmpCacheFile, e); } } @@ -132,7 +133,7 @@ public Path createTempFile(FileSystem fs) throws IOException { do { Path parentDirectory = this.cacheFilePath.getParent(); String fileName = this.cacheFilePath.getName() + "." + count; - log.info("Attempting to create " + fileName + "under " + parentDirectory); + log.info("Attempting to create {} under {}", fileName, parentDirectory); tmpCacheFile = new Path(parentDirectory, fileName); count++; } while (!fs.createNewFile(tmpCacheFile)); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/config/TableConfigCache.java b/warehouse/ingest-core/src/main/java/datawave/ingest/config/TableConfigCache.java index 730dda45e18..632f7a39985 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/config/TableConfigCache.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/config/TableConfigCache.java @@ -12,7 +12,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TableConfigCache extends BaseHdfsFileCacheUtil { @@ -27,7 +28,7 @@ public class TableConfigCache extends BaseHdfsFileCacheUtil { private static final Object lock = new Object(); - protected static final Logger log = Logger.getLogger("datawave.ingest"); + protected static final Logger log = LoggerFactory.getLogger("datawave.ingest"); private TableConfigCache(Configuration conf) { super(conf); @@ -58,7 +59,7 @@ public boolean isInitialized() { public void writeCacheFile(FileSystem fs, Path tmpCacheFile) throws IOException { Map> tempValidationMap = configMap; - log.info("Writing to temp file " + tmpCacheFile.getName()); + log.info("Writing to temp file {}", tmpCacheFile.getName()); try (PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(tmpCacheFile)), false, "UTF-8")) { for (Map.Entry> table : configMap.entrySet()) { for (Map.Entry tableProp : table.getValue().entrySet()) { @@ -66,16 +67,16 @@ public void writeCacheFile(FileSystem fs, Path tmpCacheFile) throws IOException } } } catch (IOException e) { - log.error("Unable to write cache file " + tmpCacheFile, e); + log.error("Unable to write cache file {}", tmpCacheFile, e); throw e; } // validate temp file - log.info("Validating file: " + tmpCacheFile.getName()); + log.info("Validating file: {}", tmpCacheFile.getName()); try (BufferedReader in = new BufferedReader(new InputStreamReader(FileSystem.get(tmpCacheFile.toUri(), conf).open(tmpCacheFile)))) { readCache(in); } catch (IOException ex) { - log.error("Error reading cache temp file: " + tmpCacheFile, ex); + log.error("Error reading cache temp file: {}", tmpCacheFile, ex); throw ex; } @@ -138,7 +139,7 @@ public Map getTableProperties(String tableName) throws IOExceptio read(); } if (null == this.configMap.get(tableName) || this.configMap.get(tableName).isEmpty()) { - log.error("No accumulo config cache for " + tableName + ". Please generate the accumulo config cache after ensuring the table exists."); + log.error("No accumulo config cache for {}. Please generate the accumulo config cache after ensuring the table exists.", tableName); } return this.configMap.get(tableName); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/config/TableConfigCacheGenerator.java b/warehouse/ingest-core/src/main/java/datawave/ingest/config/TableConfigCacheGenerator.java index 21a29dd3146..26adb56f70b 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/config/TableConfigCacheGenerator.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/config/TableConfigCacheGenerator.java @@ -1,7 +1,8 @@ package datawave.ingest.config; import org.apache.hadoop.conf.Configuration; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import datawave.ingest.OptionsParser; import datawave.ingest.mapreduce.job.TableConfigurationUtil; @@ -9,7 +10,7 @@ public class TableConfigCacheGenerator { protected static final Configuration config = new Configuration(); - protected static final Logger log = Logger.getLogger(TableConfigCache.class); + protected static final Logger log = LoggerFactory.getLogger(TableConfigCache.class); public static void main(String[] args) { @@ -19,7 +20,7 @@ public static void main(String[] args) { TableConfigurationUtil tcu = new TableConfigurationUtil(conf); tcu.updateCacheFile(); } catch (Exception e) { - log.error("Unable to generate accumulo config cache " + e.getMessage()); + log.error("Unable to generate accumulo config cache {}", e.getMessage()); } } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/TypeRegistry.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/TypeRegistry.java index 98537561123..e95eb7357b4 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/TypeRegistry.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/TypeRegistry.java @@ -12,12 +12,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.RecordReader; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; -import datawave.core.common.logging.ThreadConfigurableLogger; import datawave.ingest.data.config.ConfigurationHelper; import datawave.ingest.data.config.DataTypeOverrideHelper; import datawave.ingest.data.config.filter.KeyValueFilter; @@ -28,7 +28,7 @@ public class TypeRegistry extends HashMap { - private static final Logger log = ThreadConfigurableLogger.getLogger(TypeRegistry.class); + private static final Logger log = LoggerFactory.getLogger(TypeRegistry.class); public static final String ALL_PREFIX = "all"; @@ -73,7 +73,7 @@ public static TypeRegistry getInstance(Configuration config) { } /** - * Helps determine whether or not the registry instance has been instantiated. + * Helps determine whether the registry instance has been instantiated. * * @return true if the registry exists, false otherwise */ @@ -189,13 +189,13 @@ private TypeRegistry(Configuration config) { try { helperClassName = ConfigurationHelper.isNull(config, typeName + INGEST_HELPER, String.class); } catch (IllegalArgumentException e) { - log.debug("No helper class defined for type: " + typeName); + log.debug("No helper class defined for type: {}", typeName); } String readerClassName = null; try { readerClassName = ConfigurationHelper.isNull(config, typeName + RAW_READER, String.class); } catch (IllegalArgumentException e) { - log.debug("No reader class defined for type: " + typeName); + log.debug("No reader class defined for type: {}", typeName); } String[] handlerClassNames = null; try { @@ -207,7 +207,7 @@ private TypeRegistry(Configuration config) { .asList(StringUtils.trimAndRemoveEmptyStrings(ConfigurationHelper.isNull(config, EXCLUDED_HANDLER_CLASSES, String[].class))); handlerClassNames = getClassnamesWithoutExclusions(handlerClassNames, exclusions); } catch (IllegalArgumentException e) { - log.debug("No handler classes defined for type: " + typeName); + log.debug("No handler classes defined for type: {}", typeName); } String[] filterClassNames = null; @@ -217,7 +217,7 @@ private TypeRegistry(Configuration config) { StringUtils.trimAndRemoveEmptyStrings(ConfigurationHelper.isNull(config, typeName + FILTER_CLASSES, String[].class))); filterPriority = config.getInt(typeName + FILTER_PRIORITY, Integer.MAX_VALUE); } catch (IllegalArgumentException e) { - log.debug("No filter classes defined for type: " + typeName); + log.debug("No filter classes defined for type: {}", typeName); } String outputName = config.get(typeName + OUTPUT_NAME, typeName); @@ -236,27 +236,27 @@ private TypeRegistry(Configuration config) { // performing `configurationKey.split(".")[0]`. Using a period inside datatype name muddies later code // due to the manner than Hadoop Configurations operate. if (typeName.indexOf('.') != -1) { - log.error("Datatypes ('" + INGEST_DATA_TYPES + "') cannot contain a period. Offending datatype: '" + typeName + "'"); + log.error("Datatypes ({}) cannot contain a period. Offending datatype: {}", INGEST_DATA_TYPES, typeName); throw new IllegalArgumentException( "Datatypes ('" + INGEST_DATA_TYPES + "') cannot contain a period. Offending datatype: '" + typeName + "'"); } Type t = new Type(typeName, outputName, helperClass, readerClass, handlerClassNames, filterPriority, filterClassNames); - log.debug("Registered type " + t); + log.debug("Registered type {}", t); this.put(typeName, t); if (null != config.get(typeName + DataTypeOverrideHelper.Properties.DATA_TYPE_VALUES)) { for (String type : config.getStrings(typeName + DataTypeOverrideHelper.Properties.DATA_TYPE_VALUES)) { outputName = config.get(type + OUTPUT_NAME, outputName); t = new Type(type, outputName, helperClass, readerClass, handlerClassNames, filterPriority, filterClassNames); - log.debug("Registered child type:" + type); + log.debug("Registered child type: {}", type); this.put(type, t); } } } } catch (ClassNotFoundException cnfe) { - log.error("Unable to create supporting class for type " + typeName, cnfe); + log.error("Unable to create supporting class for type {}", typeName, cnfe); } } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/XMLFieldConfigHelper.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/XMLFieldConfigHelper.java index 6d09758cc4e..3670b6ee23e 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/XMLFieldConfigHelper.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/XMLFieldConfigHelper.java @@ -14,8 +14,9 @@ import javax.xml.parsers.SAXParser; import javax.xml.parsers.SAXParserFactory; -import org.apache.log4j.Logger; import org.apache.xerces.jaxp.SAXParserFactoryImpl; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.xml.sax.Attributes; import org.xml.sax.SAXException; import org.xml.sax.helpers.DefaultHandler; @@ -28,7 +29,7 @@ /** Helper class to read XML based Field Configurations */ public final class XMLFieldConfigHelper implements FieldConfigHelper { - private static final Logger log = Logger.getLogger(XMLFieldConfigHelper.class); + private static final Logger log = LoggerFactory.getLogger(XMLFieldConfigHelper.class); /** be explicit and use Apache Xerces-J here instead of relying on java to plug in the proper parser */ private static final SAXParserFactory parserFactory = SAXParserFactoryImpl.newInstance(); @@ -69,7 +70,7 @@ public static XMLFieldConfigHelper load(String fieldConfigFile, BaseIngestHelper try (InputStream in = getAsStream(fieldConfigFile)) { if (in != null) { - log.info("Loading field configuration from configuration file: " + fieldConfigFile); + log.info("Loading field configuration from configuration file: {}", fieldConfigFile); return new XMLFieldConfigHelper(in, baseIngestHelper); } else { throw new IllegalArgumentException("Field config file '" + fieldConfigFile + "' not found!"); @@ -92,7 +93,7 @@ private static InputStream getAsStream(String fieldConfigPath) { try { return uri.toURL().openStream(); } catch (IOException e) { - log.error("Could not open config location: " + fieldConfigPath, e); + log.error("Could not open config location: {}", fieldConfigPath, e); return null; } } @@ -110,7 +111,7 @@ public XMLFieldConfigHelper(InputStream in, BaseIngestHelper helper) throws Pars SAXParser parser = parserFactory.newSAXParser(); parser.parse(in, handler); - log.info("Loaded FieldConfigHelper: " + this); + log.info("Loaded FieldConfigHelper: {}", this); } public boolean addKnownField(String fieldName, FieldInfo info) { @@ -440,7 +441,7 @@ void startField(String uri, String localName, String qName, Attributes attribute if (this.ingestHelper != null) { this.ingestHelper.updateDatawaveTypes(name, fieldType); } else if (fieldType.equals(this.defaultFieldType)) { - log.warn("No BaseIngestHelper set, ignoring type information for " + name + " in configuration file"); + log.warn("No BaseIngestHelper set, ignoring type information for {} in configuration file", name); } } } @@ -496,7 +497,7 @@ void startFieldPattern(String uri, String localName, String qName, Attributes at if (this.ingestHelper != null) { this.ingestHelper.updateDatawaveTypes(pattern, fieldType); } else if (!fieldType.equals(this.defaultFieldType)) { - log.warn("No BaseIngestHelper set, ignoring type information for " + pattern + " in configuration file"); + log.warn("No BaseIngestHelper set, ignoring type information for {} in configuration file", pattern); } } } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/AbstractIngestHelper.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/AbstractIngestHelper.java index de17da681c5..6bffa329d08 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/AbstractIngestHelper.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/AbstractIngestHelper.java @@ -5,7 +5,8 @@ import java.util.Set; import java.util.TreeSet; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import datawave.data.normalizer.NormalizationException; import datawave.data.type.Type; @@ -17,7 +18,7 @@ * fields values from the datatypes that they represent. */ public abstract class AbstractIngestHelper extends DataTypeHelperImpl implements IngestHelperInterface { - private static final Logger log = Logger.getLogger(AbstractIngestHelper.class); + private static final Logger log = LoggerFactory.getLogger(AbstractIngestHelper.class); protected boolean deleteMode = false; protected boolean replaceMalformedUTF8 = false; @@ -64,7 +65,9 @@ public String getNormalizedMaskedValue(final String key) { final Set normalizedValues = normalizeFieldValue(fieldName.toUpperCase(), value); return normalizedValues.iterator().next(); } catch (final Exception ex) { - log.warn(this.getType().typeName() + ": Unable to normalize masked value of '" + value + "' for " + fieldName, ex); + if (log.isWarnEnabled()) { + log.warn("{}: Unable to normalize masked value of {} for {}", this.getType().typeName(), value, fieldName, ex); + } return value; } } @@ -145,7 +148,7 @@ public void upperCaseSetEntries(Set input, String warnMessage) { if (!s.toUpperCase().equals(s)) { removeList.add(s); addList.add(s.toUpperCase()); - log.warn(" has a value " + s + "that was converted to uppercase."); + log.warn(" has a value {} that was converted to uppercase.", s); } } input.removeAll(removeList); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/BaseIngestHelper.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/BaseIngestHelper.java index c3d28d3a2d8..bffded63810 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/BaseIngestHelper.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/BaseIngestHelper.java @@ -15,7 +15,8 @@ import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.base.Splitter; import com.google.common.collect.HashMultimap; @@ -139,7 +140,7 @@ public abstract class BaseIngestHelper extends AbstractIngestHelper implements C public static final String FIELD_CONFIG_FILE = ".data.category.field.config.file"; - private static final Logger log = ThreadConfigurableLogger.getLogger(BaseIngestHelper.class); + private static final Logger log = LoggerFactory.getLogger(BaseIngestHelper.class); private Multimap> typeFieldMap = null; private Multimap> typePatternMap = null; @@ -256,7 +257,7 @@ public void setup(Configuration config) { final String fieldConfigFile = config.get(this.getType().typeName() + FIELD_CONFIG_FILE); if (fieldConfigFile != null) { if (log.isDebugEnabled()) { - log.debug("Field config file " + fieldConfigFile + " specified for: " + this.getType().typeName() + FIELD_CONFIG_FILE); + log.debug("Field config file {} specified for: {}", fieldConfigFile, this.getType().typeName() + FIELD_CONFIG_FILE); } this.fieldConfigHelper = XMLFieldConfigHelper.load(fieldConfigFile, this); } @@ -264,7 +265,7 @@ public void setup(Configuration config) { // Process the indexed fields if (config.get(this.getType().typeName() + DISALLOWLIST_INDEX_FIELDS) != null) { if (log.isDebugEnabled()) { - log.debug("Disallowlist specified for: " + this.getType().typeName() + DISALLOWLIST_INDEX_FIELDS); + log.debug("Disallowlist specified for: {}", this.getType().typeName() + DISALLOWLIST_INDEX_FIELDS); } super.setHasIndexDisallowlist(true); configProperty = DISALLOWLIST_INDEX_FIELDS; @@ -275,10 +276,10 @@ public void setup(Configuration config) { } // Load the proper list of fields to (not) index - if (fieldConfigHelper != null) { - log.info("Using field config helper for " + this.getType().typeName()); - } else if (configProperty == null) { - log.warn("No index fields or disallowlist fields specified, not generating index fields for " + this.getType().typeName()); + if (fieldConfigHelper != null && log.isInfoEnabled()) { + log.info("Using field config helper for {}", this.getType().typeName()); + } else if (configProperty == null && log.isWarnEnabled()) { + log.warn("No index fields or disallowlist fields specified, not generating index fields for {}", this.getType().typeName()); } else { this.indexedFields = Sets.newHashSet(); Collection indexedStrings = config.getStringCollection(this.getType().typeName() + configProperty); @@ -288,7 +289,9 @@ public void setup(Configuration config) { } this.moveToPatternMap(this.indexedFields, this.indexedPatterns); } else { - log.warn(this.getType().typeName() + configProperty + " not specified."); + if (log.isWarnEnabled()) { + log.warn("{} not specified.", this.getType().typeName() + configProperty); + } } } @@ -306,7 +309,7 @@ public void setup(Configuration config) { // Process the reverse index fields if (config.get(this.getType().typeName() + DISALLOWLIST_REVERSE_INDEX_FIELDS) != null) { if (log.isDebugEnabled()) { - log.debug("Disallowlist specified for: " + this.getType().typeName() + DISALLOWLIST_REVERSE_INDEX_FIELDS); + log.debug("Disallowlist specified for: {}", this.getType().typeName() + DISALLOWLIST_REVERSE_INDEX_FIELDS); } this.setHasReverseIndexDisallowlist(true); @@ -314,16 +317,16 @@ public void setup(Configuration config) { configProperty = DISALLOWLIST_REVERSE_INDEX_FIELDS; } else if (config.get(this.getType().typeName() + REVERSE_INDEX_FIELDS) != null) { if (log.isDebugEnabled()) { - log.debug("Reverse Index specified.for: " + this.getType().typeName() + REVERSE_INDEX_FIELDS); + log.debug("Reverse Index specified.for: {}", this.getType().typeName() + REVERSE_INDEX_FIELDS); } this.setHasReverseIndexDisallowlist(false); configProperty = REVERSE_INDEX_FIELDS; } // Load the proper list of fields to (not) reverse index - if (configProperty == null) { - log.warn("No reverse index fields or disallowlist reverse index fields specified, not generating reverse index fields for " - + this.getType().typeName()); + if (configProperty == null && log.isWarnEnabled()) { + log.warn("No reverse index fields or disallowlist reverse index fields specified, not generating reverse index fields for {}", + this.getType().typeName()); } else { reverseIndexedFields = Sets.newHashSet(); Collection reverseIndexedStrings = config.getStringCollection(this.getType().typeName() + configProperty); @@ -333,7 +336,9 @@ public void setup(Configuration config) { } this.moveToPatternMap(this.reverseIndexedFields, this.reverseIndexedPatterns); } else { - log.warn(this.getType().typeName() + configProperty + " not specified"); + if (log.isWarnEnabled()) { + log.warn("{} not specified", this.getType().typeName() + configProperty); + } } } @@ -389,7 +394,7 @@ public void setup(Configuration config) { try { policy = FailurePolicy.valueOf(property.getValue()); } catch (Exception e) { - log.error("Unable to parse field normalization failure policy: " + property.getValue(), e); + log.error("Unable to parse field normalization failure policy: {}", property.getValue(), e); throw new IllegalArgumentException("Unable to parse field normalization failure policy: " + property.getValue(), e); } if (fieldName.indexOf('*') >= 0) { @@ -521,7 +526,7 @@ public static String getFieldName(Type dataType, String property, String propert // if this type already has a '.', then we have a malformed property // name if (dataType.typeName().indexOf('.') >= 0) { - log.error(propertyPattern + " property malformed: " + property); + log.error("{} property malformed: {}", propertyPattern, property); throw new IllegalArgumentException(propertyPattern + " property malformed: " + property); } @@ -544,7 +549,7 @@ public static String getFieldType(Type dataType, String property, String propert // if this type already has a '.', then we have a malformed property // name if (dataType.typeName().indexOf('.') >= 0) { - log.error(propertyPattern + " property malformed: " + property); + log.error("{} property malformed: {}", propertyPattern, property); throw new IllegalArgumentException(propertyPattern + " property malformed: " + property); } @@ -668,8 +673,8 @@ public static Matcher getBestMatch(Set patterns, String fieldName) { } if (patternMatcher.reset(fieldName).matches()) { if (bestMatch != null) { - log.warn("Multiple regular expression patterns with the same length exist for matching field " + fieldName - + ". The pattern that sorts lexicographically last will be used. Please verify your configurations."); + log.warn("Multiple regular expression patterns with the same length exist for matching field {}. " + + "The pattern that sorts lexicographically last will be used. Please verify your configurations.", fieldName); break; } else { bestMatch = patternMatcher; @@ -746,9 +751,7 @@ public HashSet normalizeFieldValue(String fieldName, value.setEventFieldValue(null); } values.add(value); - if (log.isDebugEnabled()) { - log.debug("added normalized field " + value + " to values set."); - } + log.debug("added normalized field {} to values set.", value); } return values; } @@ -774,20 +777,13 @@ protected NormalizedContentInterface normalizeFieldValue(NormalizedContentInterf */ protected Set normalize(NormalizedContentInterface normalizedContent) { String eventFieldName = normalizedContent.getEventFieldName(); - if (log.isDebugEnabled()) { - log.debug("event field name is " + eventFieldName + " in " + normalizedContent); - } + log.debug("event field name is {} in {}", eventFieldName, normalizedContent); String indexedFieldName = normalizedContent.getIndexedFieldName(); - if (log.isDebugEnabled()) { - log.debug("indexed field name is " + indexedFieldName + " in " + normalizedContent); - } + log.debug("indexed field name is {} in {}", indexedFieldName, normalizedContent); // if it is indexed, set the index part, if (this.isIndexedField(eventFieldName) || this.isIndexedField(indexedFieldName)) { - if (log.isDebugEnabled()) { - log.debug("eventFieldName=" + eventFieldName + ", indexedFieldName =" + indexedFieldName + " we have an indexed field here " - + normalizedContent); - } + log.debug("eventFieldName={}, indexedFieldName={} we have an indexed field here {}", eventFieldName, indexedFieldName, normalizedContent); Collection> dataTypes = getDataTypes(normalizedContent.getIndexedFieldName()); HashSet values = new HashSet<>(dataTypes.size()); for (datawave.data.type.Type dataType : dataTypes) { @@ -796,40 +792,29 @@ protected Set normalize(NormalizedContentInterface n } else { values.add(normalize(normalizedContent, dataType)); } - if (log.isDebugEnabled()) { - log.debug("added normalized field " + normalizedContent + " to values " + values); - } + log.debug("added normalized field {} to values {}", normalizedContent, values); } return values; } // if it is normalized, set the field value part and the (unused) // indexed field value part if (this.isNormalizedField(eventFieldName) || this.isNormalizedField(indexedFieldName)) { - if (log.isDebugEnabled()) { - log.debug("eventFieldName=" + eventFieldName + ", indexedFieldName =" + indexedFieldName + " we have a normalized field here " - + normalizedContent); - } + log.debug("eventFieldName={}, indexedFieldName={} we have a normalized field here {}", eventFieldName, indexedFieldName, normalizedContent); Collection> dataTypes = getDataTypes(normalizedContent.getIndexedFieldName()); HashSet values = new HashSet<>(dataTypes.size()); for (datawave.data.type.Type dataType : dataTypes) { values.add(normalizeFieldValue(normalizedContent, dataType)); - if (log.isDebugEnabled()) { - log.debug("added normalized field " + normalizedContent + " to values " + values); - } + log.debug("added normalized field {} to values {}", normalizedContent, values); } return values; } else { // gets the default normalizer, if present - if (log.isDebugEnabled()) { - log.debug("not a normalized field: " + indexedFieldName + " nor " + eventFieldName); - } + log.debug("not a normalized field: {} nor {}", indexedFieldName, eventFieldName); Collection> dataTypes = getDataTypes(normalizedContent.getIndexedFieldName()); HashSet values = new HashSet<>(dataTypes.size()); for (datawave.data.type.Type dataType : dataTypes) { values.add(normalize(normalizedContent, dataType)); - if (log.isDebugEnabled()) { - log.debug("added normalized field " + normalizedContent + " to values " + values); - } + log.debug("added normalized field {} to values {}", normalizedContent, values); } return values; } @@ -934,8 +919,9 @@ public Multimap normalize(Multimap e : fields.entries()) { if (e.getValue() != null) { applyNormalizationAndAddToResults(results, new NormalizedFieldAndValue(e.getKey(), e.getValue())); - } else - log.warn(this.getType().typeName() + " has key " + e.getKey() + " with a null value."); + } else if (log.isWarnEnabled()) { + log.warn("{} has key {} with a null value.", this.getType().typeName(), e.getKey()); + } } return results; } @@ -952,8 +938,9 @@ public Multimap normalizeMap(Multimap e : fields.entries()) { if (e.getValue() != null) { applyNormalizationAndAddToResults(results, e.getValue()); - } else - log.warn(this.getType().typeName() + " has key " + e.getKey() + " with a null value."); + } else if (log.isWarnEnabled()) { + log.warn("{} has key {} with a null value.", this.getType().typeName(), e.getKey()); + } } return results; } @@ -1020,7 +1007,7 @@ protected Set normalizeAndAlias(NormalizedContentInt ns = normalize(aliaser.normalizeAndAlias(nArg)); } catch (Exception e) { if (log.isTraceEnabled()) { - log.trace(this.getType().typeName() + ": Field failed normalization: " + nArg, e); + log.trace("{}: Field failed normalization: {}", this.getType().typeName(), nArg, e); } nArg.setError(e); return Collections.singleton(nArg); @@ -1120,10 +1107,12 @@ public boolean verify() { // first verify the index fields if (this.indexedFields == null) { retVal = false; - log.error(this.getType().typeName() + ": index set has been set to null."); + if (log.isErrorEnabled()) { + log.error("{}: index set has been set to null.", this.getType().typeName()); + } } else if (this.indexedFields.isEmpty()) { if (log.isDebugEnabled()) { - log.debug(this.getType().typeName() + ": no fields have been set to index."); + log.debug("{}: no fields have been set to index.", this.getType().typeName()); } } else { upperCaseSetEntries(this.indexedFields, this.getType().typeName() + ": index fields"); @@ -1131,10 +1120,12 @@ public boolean verify() { // next verify the index fields if (this.reverseIndexedFields == null) { retVal = false; - log.error(this.getType().typeName() + ": reverse index set has been set to null."); + if (log.isErrorEnabled()) { + log.error("{}: reverse index set has been set to null.", this.getType().typeName()); + } } else if (this.reverseIndexedFields.isEmpty()) { if (log.isDebugEnabled()) { - log.debug(this.getType().typeName() + ": no fields have been set to reverse index."); + log.debug("{}: no fields have been set to reverse index.", this.getType().typeName()); } } else { upperCaseSetEntries(this.reverseIndexedFields, this.getType().typeName() + ": reverse index fields"); @@ -1208,9 +1199,7 @@ public Multimap getVirtualFields(Multimap= 0 || fieldName.indexOf('+') >= 0) { // We need a more conclusive test for regex typePatternMap.put(fieldName, datawaveType); @@ -1228,7 +1215,7 @@ public void updateDatawaveTypes(String fieldName, String typeClasses) { typeFieldMap.put(fieldName, datawaveType); } if (log.isDebugEnabled()) { - log.debug("Registered a " + typeClass + " for type[" + this.getType().typeName() + "], field[" + fieldName + "]"); + log.debug("Registered a {} for type[{}], field[{}}]", typeClass, this.getType().typeName(), fieldName); } } } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/CSVIngestHelper.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/CSVIngestHelper.java index 3bbf5bbbfae..3edbe64e951 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/CSVIngestHelper.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/CSVIngestHelper.java @@ -5,7 +5,8 @@ import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.lang.text.StrTokenizer; import org.apache.hadoop.conf.Configuration; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; @@ -17,7 +18,7 @@ public class CSVIngestHelper extends ContentBaseIngestHelper { - private static final Logger log = Logger.getLogger(CSVIngestHelper.class); + private static final Logger log = LoggerFactory.getLogger(CSVIngestHelper.class); protected CSVHelper helper = null; @Override @@ -156,12 +157,12 @@ protected void processExtraField(Multimap fields, String fieldVal } } } else { - log.error("Unable to process the following as a name=value pair: " + fieldValue); + log.error("Unable to process the following as a name=value pair: {}", fieldValue); } } /** - * Process a field. This will split multi-valued fields as necessary and call processField on each part. + * Process a field. This will split multivalued fields as necessary and call processField on each part. * * @param fields * list of fields @@ -173,7 +174,7 @@ protected void processExtraField(Multimap fields, String fieldVal protected void processPreSplitField(Multimap fields, String fieldName, String fieldValue) { if (fieldValue != null) { if (helper.isMultiValuedField(fieldName)) { - // Value can be multiple parts, need to break on semi-colon + // Value can be multiple parts, need to break on semicolon String singleFieldName = helper.usingMultiValuedFieldsDisallowlist() ? fieldName : helper.getMultiValuedFields().get(fieldName); int limit = helper.getMultiFieldSizeThreshold(); int count = 0; diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/CompositeIngest.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/CompositeIngest.java index a1f7f437cc1..a5fef654c62 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/CompositeIngest.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/CompositeIngest.java @@ -16,7 +16,8 @@ import org.apache.commons.lang.NotImplementedException; import org.apache.hadoop.conf.Configuration; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.base.Objects; import com.google.common.collect.HashMultimap; @@ -78,7 +79,7 @@ static boolean isOverloadedCompositeField(Collection compFields, String class CompositeFieldNormalizer { private static final long serialVersionUID = -3892470989028896718L; - private static final Logger log = Logger.getLogger(CompositeFieldNormalizer.class); + private static final Logger log = LoggerFactory.getLogger(CompositeFieldNormalizer.class); private static final String DEFAULT_SEPARATOR = new String(Character.toChars(Character.MAX_CODE_POINT)); @@ -149,7 +150,7 @@ public void setup(Type type, Configuration config) { // if any members are indexOnly fields, skip this one if (!Sets.intersection(Sets.newHashSet(componentFields), indexOnly).isEmpty()) { - log.warn("rejecting " + compositeField + " which includes index only field in " + indexOnly); + log.warn("rejecting {} which includes index only field in {}", compositeField, indexOnly); continue; } @@ -187,7 +188,7 @@ public void setup(Type type, Configuration config) { Set emptySet = Collections.emptySet(); ignoreNormalizationForFields = (null != ignoreNormalization) ? cleanSet(ignoreNormalization) : emptySet; - log.debug("setup with composites " + this.compositeToFieldMap); + log.debug("setup with composites {}", this.compositeToFieldMap); } /** diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/ContentBaseIngestHelper.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/ContentBaseIngestHelper.java index c85b66638ae..466b76ff859 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/ContentBaseIngestHelper.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/ContentBaseIngestHelper.java @@ -5,7 +5,8 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; @@ -16,7 +17,7 @@ */ public abstract class ContentBaseIngestHelper extends AbstractContentIngestHelper { - private static final Logger log = Logger.getLogger(ContentBaseIngestHelper.class); + private static final Logger log = LoggerFactory.getLogger(ContentBaseIngestHelper.class); private final Set contentIndexAllowlist = new HashSet<>(); private final Set contentReverseIndexAllowlist = new HashSet<>(); @@ -107,8 +108,8 @@ public void setup(Configuration config) throws IllegalArgumentException { : rawDocumentViewName; if (log.isTraceEnabled()) { log.trace("saveRawDataOption was true"); - log.trace("getType().typeName()+RAW_DOCUMENT_VIEW_NAME: " + getType().typeName() + RAW_DOCUMENT_VIEW_NAME); - log.trace("config.get(getType().typeName()+RAW_DOCUMENT_VIEW_NAME): " + config.get(getType().typeName() + RAW_DOCUMENT_VIEW_NAME)); + log.trace("getType().typeName()+RAW_DOCUMENT_VIEW_NAME: {}", getType().typeName() + RAW_DOCUMENT_VIEW_NAME); + log.trace("config.get(getType().typeName()+RAW_DOCUMENT_VIEW_NAME): {}", config.get(getType().typeName() + RAW_DOCUMENT_VIEW_NAME)); } } } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/EventFieldNormalizerHelper.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/EventFieldNormalizerHelper.java index df9fc378b63..b3c96118be4 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/EventFieldNormalizerHelper.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/EventFieldNormalizerHelper.java @@ -5,7 +5,8 @@ import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.Maps; @@ -25,7 +26,7 @@ * * public void setup(Configuration config) { ... eventFieldNormalizerHelper = new EventFieldNormalizerHelper(config); ... } * - * then override the normalize(NormalizedContentInterface) as follows: + * then override the method normalize(NormalizedContentInterface) as follows: * * public NormalizedContentInterface normalize(NormalizedContentInterface nci) { * @@ -46,7 +47,7 @@ public class EventFieldNormalizerHelper { private Map> typeCompiledPatternMap = null; private static final Type NO_OP_TYPE = new NoOpType(); - private static final Logger log = Logger.getLogger(EventFieldNormalizerHelper.class); + private static final Logger log = LoggerFactory.getLogger(EventFieldNormalizerHelper.class); /** * @@ -85,7 +86,9 @@ public EventFieldNormalizerHelper(Configuration config) { } else { typeFieldMap.put(fieldName, normalizer); } - log.debug("Registered a " + normalizerClass + " for type[" + this.getType().typeName() + "], EVENT (not index) field[" + fieldName + "]"); + if (log.isDebugEnabled()) { + log.debug("Registered a {} for type [{}], EVENT (not index) field[{}]", normalizerClass, this.getType().typeName(), fieldName); + } } } } @@ -151,7 +154,7 @@ protected String getFieldName(String property, String propertyPattern) { if (fieldName.indexOf('.') >= 0) { // if this type already has a '.', then we have a malformed property name if (this.getType().typeName().indexOf('.') >= 0) { - log.error(propertyPattern + " property malformed: " + property); + log.error("{} property malformed: {}", propertyPattern, property); throw new IllegalArgumentException(propertyPattern + " property malformed: " + property); } fieldName = null; diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/IngestFieldFilter.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/IngestFieldFilter.java index 06f637925cd..c555f973aac 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/IngestFieldFilter.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/config/ingest/IngestFieldFilter.java @@ -10,7 +10,8 @@ import org.apache.commons.collections4.keyvalue.AbstractMapEntry; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.Lists; import com.google.common.collect.Multimap; @@ -46,7 +47,7 @@ */ public class IngestFieldFilter { - private static final Logger logger = Logger.getLogger(IngestFieldFilter.class); + private static final Logger logger = LoggerFactory.getLogger(IngestFieldFilter.class); @Deprecated public static final String FILTER_FIELD_SUFFIX = ".data.field.filter"; @@ -75,10 +76,10 @@ public void setup(Configuration conf) { fieldNameFilters = new FieldConfiguration(); fieldNameFilters.load(conf.get(dataType.typeName() + FILTER_FIELD_SUFFIX), false); fieldNameFilters.load(conf.get(dataType.typeName() + FILTER_FIELD_NAME_SUFFIX), false); - logger.info("Field Name Filters for " + dataType.typeName() + ": " + fieldNameFilters); + logger.info("Field Name Filters for {}: {}", dataType.typeName(), fieldNameFilters); fieldValueFilters = new FieldConfiguration(conf.get(dataType.typeName() + FILTER_FIELD_VALUE_SUFFIX), true); - logger.info("Field Value Filters for " + dataType.typeName() + ": " + fieldValueFilters); + logger.info("Field Value Filters for {}: {}", dataType.typeName(), fieldValueFilters); } /** @@ -92,7 +93,7 @@ public void apply(Multimap fields) { for (FieldFilter filter : fieldNameFilters) { if (fields.keySet().containsAll(filter.getKeepFields())) { if (logger.isTraceEnabled()) { - logger.trace("Removing " + filter.getDropFields() + " because " + filter.getKeepFields() + " exists in event"); + logger.trace("Removing {} because {} exists in event", filter.getDropFields(), filter.getKeepFields()); } fields.keySet().removeAll(filter.getDropFields()); } @@ -103,9 +104,7 @@ public void apply(Multimap fields) { for (List keepValues : gatherValueLists(fields, filter.getKeepFields(), -1, null)) { for (List toRemoveValues : gatherValueLists(fields, filter.getDropFields(), -1, null)) { if (equalValues(keepValues, toRemoveValues)) { - if (logger.isTraceEnabled()) { - logger.trace("Removing " + toRemoveValues + " because " + keepValues + " exists in event"); - } + logger.trace("Removing {} because {} exists in event", toRemoveValues, keepValues); for (FieldValue toRemoveValue : toRemoveValues) { fields.remove(toRemoveValue.getKey(), toRemoveValue.getValue()); } @@ -266,7 +265,7 @@ public String getGroup() { } /** - * Determine if the raw value in this matches the raw value in another + * Determine if the raw value in this, matches the raw value in another * * @param other * the field value to check diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/normalizer/AbstractNormalizer.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/normalizer/AbstractNormalizer.java index 186055207d7..317ba39e12a 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/normalizer/AbstractNormalizer.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/normalizer/AbstractNormalizer.java @@ -6,7 +6,8 @@ import java.util.Map.Entry; import org.apache.hadoop.conf.Configuration; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; @@ -17,7 +18,7 @@ import datawave.ingest.data.config.NormalizedFieldAndValue; public abstract class AbstractNormalizer implements TextNormalizer { - private static final Logger log = Logger.getLogger(AbstractNormalizer.class); + private static final Logger log = LoggerFactory.getLogger(AbstractNormalizer.class); @Override public void setup(Type type, String instance, Configuration config) {} @@ -33,7 +34,7 @@ public void setup(Type type, String instance, Configuration config) {} * configuration to use * @param normalizerClass * the normalizerClass to set up - * @return An configured instance of the normalizerClass + * @return A configured instance of the normalizerClass */ public static TextNormalizer createNormalizer(Type type, String instance, Configuration config, String normalizerClass) { Class c; @@ -50,7 +51,7 @@ public static TextNormalizer createNormalizer(Type type, String instance, Config throw new IllegalArgumentException("Error creating instance of class " + normalizerClass + ':' + e.getLocalizedMessage(), e); } if (o instanceof TextNormalizer) { - // setup the normalizer + // set up the normalizer ((TextNormalizer) o).setup(type, instance, config); } else { throw new IllegalArgumentException(normalizerClass + " is not an instance of " + TextNormalizer.class.getName()); @@ -223,7 +224,9 @@ public NormalizedContentInterface normalize(NormalizedContentInterface field) { if (field.getEventFieldName().equals("IP_GEO_FM_COORDINATES") && field.getEventFieldValue().equals("-99.999/-999.999")) { log.warn("Found know bad default value: IP_GEO_FM_COORDINATES=-99.999/-999.999"); } else { - log.error("Failed to normalize " + field.getEventFieldName() + '=' + field.getEventFieldValue(), e); + if (log.isErrorEnabled()) { + log.error("Failed to normalize {}={}", field.getEventFieldName(), field.getEventFieldValue(), e); + } } n.setError(e); } @@ -240,7 +243,9 @@ public Multimap normalize(Multimap extractFieldNameComponents(Mu try { revisedField = extractFieldNameComponents(field); } catch (Exception e) { - log.error("Failed to extract field name components: " + field.getIndexedFieldName() + '=' + field.getIndexedFieldValue(), e); + if (log.isErrorEnabled()) { + log.error("Failed to extract field name components: {}={}", field.getIndexedFieldName(), field.getIndexedFieldValue(), e); + } revisedField.setError(e); } results.put(revisedField.getIndexedFieldName(), revisedField); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/data/tokenize/TokenizationHelper.java b/warehouse/ingest-core/src/main/java/datawave/ingest/data/tokenize/TokenizationHelper.java index 931b2efa40e..1a5f2fa0fad 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/data/tokenize/TokenizationHelper.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/data/tokenize/TokenizationHelper.java @@ -3,16 +3,17 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import datawave.ingest.data.config.DataTypeHelper; import datawave.util.ObjectFactory; public class TokenizationHelper { - private static final Logger log = Logger.getLogger(TokenizationHelper.class); + private static final Logger log = LoggerFactory.getLogger(TokenizationHelper.class); /** * Used to track tokenization execution time. It's too expensive to perform a call to System.currentTimeMillis() each time we produce a new token, so spawn @@ -21,7 +22,7 @@ public class TokenizationHelper { * The main thread will check the counter value each time it produces a new token and thus track the number of ticks that have elapsed. */ public static class HeartBeatThread extends Thread { - private static final Logger log = Logger.getLogger(HeartBeatThread.class); + private static final Logger log = LoggerFactory.getLogger(HeartBeatThread.class); public static final long INTERVAL = 500; // half second resolution public static volatile int counter = 0; @@ -45,11 +46,11 @@ public void run() { } // verify that we're exeuting in a timely fashion - // ..if not warn. + // if not warn. long currentRun = System.currentTimeMillis(); long delta = currentRun - lastRun; if (delta > (INTERVAL * 1.5)) { - log.warn("HeartBeatThread starved for cpu, " + "should execute every " + INTERVAL + " ms, " + "latest: " + delta + " ms."); + log.warn("HeartBeatThread starved for cpu, should execute every {}ms, latest: {}ms.", INTERVAL, delta); } lastRun = currentRun; counter++; diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/input/reader/AbstractEventRecordReader.java b/warehouse/ingest-core/src/main/java/datawave/ingest/input/reader/AbstractEventRecordReader.java index 20083dae89e..cb35e5deb91 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/input/reader/AbstractEventRecordReader.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/input/reader/AbstractEventRecordReader.java @@ -18,7 +18,8 @@ import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.input.FileSplit; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.Sets; @@ -41,7 +42,7 @@ public abstract class AbstractEventRecordReader extends RecordReader uidOverrideFields = new TreeMap<>(); @@ -193,7 +194,7 @@ protected void setDefaultSecurityMarkings(RawRecordContainer event) { * Ability to override the UID value. This is useful for datatypes where we want the UID to be based off the configured id field's value instead of the * entire record, so that the csv records and bud file content are merged into one event in the shard table. For the enrichment data, we want to base the * UID off of the MD5 hash and some other metadata, but not the dates in the record. This is because we will have to reload the enrichment data on a regular - * basis and we want the same hashes to merge. + * basis, and we want the same hashes to merge. * * @param event * the event container to examine @@ -238,12 +239,12 @@ protected void extractEventDate(final String fieldName, final String fieldValue) try { event.setDate(format.parse(DateNormalizer.convertMicroseconds(fieldValue, format.toPattern())).getTime()); if (logger.isDebugEnabled()) { - logger.debug("Parsed date from '" + fieldName + "' using formatter " + format.toPattern()); + logger.debug("Parsed date from {} using formatter {}", fieldName, format.toPattern()); } break; } catch (java.text.ParseException e) { if (logger.isTraceEnabled()) { - logger.trace("Error parsing date from hash record using format " + format.toPattern(), e); + logger.trace("Error parsing date from hash record using format {}", format.toPattern(), e); } } } @@ -252,7 +253,7 @@ protected void extractEventDate(final String fieldName, final String fieldValue) for (SimpleDateFormat formatter : formatters) { patterns.add(formatter.toPattern()); } - logger.error("Unable to parse date '" + fieldValue + "' from field '" + fieldName + " using formatters " + patterns); + logger.error("Unable to parse date {} from field {} using formatters {}", fieldValue, fieldName, patterns); } } else if (formatter != null) { try { diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardFutureIntervalPredicate.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardFutureIntervalPredicate.java index 71d6bcb7fe0..6ad5d61a79c 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardFutureIntervalPredicate.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardFutureIntervalPredicate.java @@ -1,7 +1,8 @@ package datawave.ingest.mapreduce; import org.apache.hadoop.conf.Configuration; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import datawave.ingest.data.RawRecordContainer; import datawave.ingest.metric.IngestInput; @@ -9,11 +10,11 @@ public class DataTypeDiscardFutureIntervalPredicate implements RawRecordPredicate { - private static final Logger log = Logger.getLogger(DataTypeDiscardFutureIntervalPredicate.class); + private static final Logger log = LoggerFactory.getLogger(DataTypeDiscardFutureIntervalPredicate.class); /** - * number which will be used to evaluate whether or not an Event should be processed. If the Event.getEventDate() is less than (now + interval) then it will - * be processed. + * number which will be used to evaluate whether an Event should be processed. If the Event.getEventDate() is less than (now + interval) then it will be + * processed. */ public static final String DISCARD_FUTURE_INTERVAL = "event.discard.future.interval"; @@ -25,7 +26,7 @@ public class DataTypeDiscardFutureIntervalPredicate implements RawRecordPredicat public void setConfiguration(String type, Configuration conf) { long defaultInterval = conf.getLong(DISCARD_FUTURE_INTERVAL, 0l); this.discardFutureInterval = conf.getLong(type + "." + DISCARD_FUTURE_INTERVAL, defaultInterval); - log.info("Setting up type: " + type + " with future interval " + this.discardFutureInterval); + log.info("Setting up type: {} with future interval {}", type, this.discardFutureInterval); } @Override @@ -33,7 +34,7 @@ public boolean shouldProcess(RawRecordContainer record) { // Determine whether the event date is greater than the interval. Excluding fatal error events. if (discardFutureInterval != 0L && (record.getDate() > (now.get() + discardFutureInterval))) { if (log.isInfoEnabled()) - log.info("Event with time " + record.getDate() + " newer than specified interval of " + (now.get() + discardFutureInterval) + ", skipping..."); + log.info("Event with time {} newer than specified interval of {}, skipping...", record.getDate(), (now.get() + discardFutureInterval)); return false; } return true; diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardIntervalPredicate.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardIntervalPredicate.java index 69a0857cc26..da3a0df0540 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardIntervalPredicate.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/DataTypeDiscardIntervalPredicate.java @@ -1,7 +1,8 @@ package datawave.ingest.mapreduce; import org.apache.hadoop.conf.Configuration; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import datawave.ingest.data.RawRecordContainer; import datawave.ingest.metric.IngestInput; @@ -9,11 +10,11 @@ public class DataTypeDiscardIntervalPredicate implements RawRecordPredicate { - private static final Logger log = Logger.getLogger(DataTypeDiscardIntervalPredicate.class); + private static final Logger log = LoggerFactory.getLogger(DataTypeDiscardIntervalPredicate.class); /** - * number which will be used to evaluate whether or not an Event should be processed. If the Event.getEventDate() is greater than (now - interval) then it - * will be processed. + * number which will be used to evaluate whether an Event should be processed. If the Event.getEventDate() is greater than (now - interval) then it will be + * processed. */ public static final String DISCARD_INTERVAL = "event.discard.interval"; @@ -25,7 +26,7 @@ public class DataTypeDiscardIntervalPredicate implements RawRecordPredicate { public void setConfiguration(String type, Configuration conf) { long defaultInterval = conf.getLong(DISCARD_INTERVAL, 0l); this.discardInterval = conf.getLong(type + "." + DISCARD_INTERVAL, defaultInterval); - log.info("Setting up type: " + type + " with interval " + this.discardInterval); + log.info("Setting up type: {} with interval {}", type, this.discardInterval); } @Override @@ -33,7 +34,7 @@ public boolean shouldProcess(RawRecordContainer record) { // Determine whether the event date is greater than the interval. Excluding fatal error events. if (discardInterval != 0L && (record.getDate() < (now.get() - discardInterval))) { if (log.isInfoEnabled()) - log.info("Event with time " + record.getDate() + " older than specified interval of " + (now.get() - discardInterval) + ", skipping..."); + log.info("Event with time {} older than specified interval of {}, skipping...", record.getDate(), (now.get() - discardInterval)); return false; } return true; diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/atom/AtomDataTypeHandler.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/atom/AtomDataTypeHandler.java index f48dfa0a9ec..5ba56f69fda 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/atom/AtomDataTypeHandler.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/atom/AtomDataTypeHandler.java @@ -13,7 +13,8 @@ import org.apache.hadoop.mapreduce.StatusReporter; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskInputOutputContext; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.base.Strings; import com.google.common.collect.Multimap; @@ -42,7 +43,7 @@ */ public class AtomDataTypeHandler implements ExtendedDataTypeHandler { - private static final Logger log = Logger.getLogger(AtomDataTypeHandler.class); + private static final Logger log = LoggerFactory.getLogger(AtomDataTypeHandler.class); public static final String ATOM_TYPE = "atom"; public static final String ATOM_TABLE_NAME = ATOM_TYPE + ".table.name"; diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/dateindex/DateIndexDataTypeHandler.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/dateindex/DateIndexDataTypeHandler.java index 25fa6caee29..acb8cf8b8a6 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/dateindex/DateIndexDataTypeHandler.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/dateindex/DateIndexDataTypeHandler.java @@ -20,7 +20,8 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.StatusReporter; import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; @@ -78,7 +79,7 @@ */ public class DateIndexDataTypeHandler implements DataTypeHandler, RawRecordMetadata { - private static final Logger log = ThreadConfigurableLogger.getLogger(DateIndexDataTypeHandler.class); + private static final Logger log = LoggerFactory.getLogger(DateIndexDataTypeHandler.class); public static final String DATEINDEX_TNAME = "date.index.table.name"; public static final String DATEINDEX_LPRIORITY = "date.index.table.loader.priority"; @@ -152,7 +153,7 @@ public void setup(TaskAttemptContext context) { String tableName = conf.get(DATEINDEX_TNAME, null); if (null == tableName) { - log.error(DATEINDEX_TNAME + " not specified, no date index will be created"); + log.error("{} not specified, no date index will be created", DATEINDEX_TNAME); } else { setDateIndexTableName(new Text(tableName)); } @@ -176,7 +177,9 @@ public void setup(TaskAttemptContext context) { } typeToFields.put(parts[0], parts[1]); } - log.info(this.getClass().getSimpleName() + " configured for " + dataType.typeName() + ": " + typeToFields); + if (log.isInfoEnabled()) { + log.info("{} configured for {}: {}", this.getClass().getSimpleName(), dataType.typeName(), typeToFields); + } dataTypeToTypeToFields.put(dataType.typeName(), typeToFields); } } @@ -243,7 +246,7 @@ private void getBulkIngestKeys(RawRecordContainer event, Multimap getEdgeKeyVersionDateChange() throws IOException { */ public void updateCache(FileSystem fs) throws AccumuloSecurityException, AccumuloException, IOException, TableNotFoundException { - log.info("Reading the " + metadataTableName + " for edge key version ..."); + log.info("Reading the {} for edge key version ...", metadataTableName); if (this.cbHelper == null) { this.cbHelper = new AccumuloHelper(); this.cbHelper.setup(conf); @@ -134,7 +135,7 @@ public void updateCache(FileSystem fs) throws AccumuloSecurityException, Accumul * "old" edge key from being created...that is, with EdgeKey.DATE_TYPE.OLD_EVENT (See ProtobufEdgeDataTypeHandler.writeEdges) */ Date then = new Date(0); - log.warn("Could not find any edge key version entries in the " + metadataTableName + " table. Automatically seeding with date: " + then); + log.warn("Could not find any edge key version entries in the {} table. Automatically seeding with date: {}", metadataTableName, then); String dateString = seedMetadataTable(client, then.getTime(), 1); versionDates.put(1, dateString); } @@ -165,11 +166,11 @@ public void updateCache(FileSystem fs) throws AccumuloSecurityException, Accumul throw new IOException("Failed to rename temporary splits file"); } } catch (Exception e) { - log.warn("Unable to rename " + tmpVersionFile + " to " + this.versioningCache + " probably because somebody else replaced it", e); + log.warn("Unable to rename {} to {} probably because somebody else replaced it", tmpVersionFile, this.versioningCache, e); try { fs.delete(tmpVersionFile, false); } catch (Exception e2) { - log.error("Unable to clean up " + tmpVersionFile, e2); + log.error("Unable to clean up {}", tmpVersionFile, e2); } } } catch (Exception e) { @@ -247,11 +248,11 @@ public void createMetadataEntry(long time, int keyVersionNum) throws Exception { private void ensureTableExists(AccumuloClient client) throws AccumuloSecurityException, AccumuloException { TableOperations tops = client.tableOperations(); if (!tops.exists(metadataTableName)) { - log.info("Creating table: " + metadataTableName); + log.info("Creating table: {}", metadataTableName); try { tops.create(metadataTableName); } catch (TableExistsException e) { - log.error(metadataTableName + " already exists someone got here first."); + log.error("{} already exists someone got here first.", metadataTableName); } } } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/define/EdgeDataBundle.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/define/EdgeDataBundle.java index 18c5488886e..77d25163774 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/define/EdgeDataBundle.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/edge/define/EdgeDataBundle.java @@ -7,7 +7,8 @@ import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.security.ColumnVisibility; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import datawave.edge.util.EdgeKey; import datawave.edge.util.EdgeValue.EdgeValueBuilder; @@ -28,7 +29,7 @@ */ public class EdgeDataBundle { - private static final Logger log = Logger.getLogger(EdgeDataBundle.class); + private static final Logger log = LoggerFactory.getLogger(EdgeDataBundle.class); // Input/Setup variables // final so you're not tempted to change them @@ -91,7 +92,7 @@ public EdgeDataBundle(EdgeDefinition edgeDef, NormalizedContentInterface ifaceSo if (event.getAltIds() != null && !event.getAltIds().isEmpty()) { this.uuid = event.getAltIds().iterator().next(); } - // even though event, etc references are saved above, passing in the event + // even though event, etc. references are saved above, passing in the event // prevents future bug this.initFieldMasking(helper, event); this.initMarkings(getSource().getMarkings(), getSink().getMarkings()); @@ -533,7 +534,7 @@ public static Value getStatsLinkValue(final String realmedIdentifier) { return (new Value(hll.getBytes())); } catch (IOException e) { - log.warn("Failed to add " + realmedIdentifier + " to HyperLogLog", e); + log.warn("Failed to add {} to HyperLogLog", realmedIdentifier, e); return (null); } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/error/ErrorDataTypeHandler.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/error/ErrorDataTypeHandler.java index 0abad1bf986..55cea907a05 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/error/ErrorDataTypeHandler.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/error/ErrorDataTypeHandler.java @@ -15,7 +15,8 @@ import org.apache.hadoop.mapreduce.StatusReporter; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskInputOutputContext; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.Multimap; @@ -99,7 +100,7 @@ */ public class ErrorDataTypeHandler implements ExtendedDataTypeHandler { - private static final Logger log = Logger.getLogger(ErrorDataTypeHandler.class); + private static final Logger log = LoggerFactory.getLogger(ErrorDataTypeHandler.class); public static final String ERROR_TABLE = "error.table"; public static final String ERROR_TABLE_NAME = ERROR_TABLE + ".name"; diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/error/ErrorShardedDataTypeHandler.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/error/ErrorShardedDataTypeHandler.java index e167321c8d7..1354ece7ae1 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/error/ErrorShardedDataTypeHandler.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/error/ErrorShardedDataTypeHandler.java @@ -16,7 +16,8 @@ import org.apache.hadoop.mapreduce.StatusReporter; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskInputOutputContext; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; @@ -102,7 +103,7 @@ */ public class ErrorShardedDataTypeHandler extends AbstractColumnBasedHandler implements ExtendedDataTypeHandler { - private static final Logger log = Logger.getLogger(ErrorShardedDataTypeHandler.class); + private static final Logger log = LoggerFactory.getLogger(ErrorShardedDataTypeHandler.class); public static final String ERROR_PROP_PREFIX = "error."; @@ -226,7 +227,7 @@ public long process(KEYIN key, RawRecordContainer record, Multimap implements ExtendedDataTypeHandler, FacetedEstimator { - private static final Logger log = Logger.getLogger(FacetHandler.class); + private static final Logger log = LoggerFactory.getLogger(FacetHandler.class); /* Global configuration properties */ @@ -272,10 +273,8 @@ public long process(KEYIN key, RawRecordContainer event, Multimap= System.currentTimeMillis() @@ -211,7 +214,7 @@ public boolean isInitialized() { public void updateCache() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, IOException { FileSystem fs = this.numShardsCachePath.getFileSystem(this.conf); String metadataTableName = ConfigurationHelper.isNull(this.conf, ShardedDataTypeHandler.METADATA_TABLE_NAME, String.class); - log.info("Reading the " + metadataTableName + " for multiple numshards configuration"); + log.info("Reading the {} for multiple numshards configuration", metadataTableName); if (this.aHelper == null) { this.aHelper = new AccumuloHelper(); @@ -255,7 +258,7 @@ public void updateCache() throws AccumuloException, AccumuloSecurityException, T // now move the temporary file to the file cache try { fs.delete(this.numShardsCachePath, false); - // Note this rename will fail if the file already exists (i.e. the delete failed or somebody just replaced it) + // Note this rename will fail if the file already exists (i.e. the delete behavior failed or somebody just replaced it) // but this is OK... if (!fs.rename(tmpShardCacheFile, this.numShardsCachePath)) { throw new IOException("Failed to rename temporary multiple numshards cache file"); @@ -263,11 +266,11 @@ public void updateCache() throws AccumuloException, AccumuloSecurityException, T isCacheLoaded = true; } catch (Exception e) { - log.warn("Unable to rename " + tmpShardCacheFile + " to " + this.numShardsCachePath + " probably because somebody else replaced it", e); + log.warn("Unable to rename {} to {} probably because somebody else replaced it", tmpShardCacheFile, this.numShardsCachePath, e); try { fs.delete(tmpShardCacheFile, false); } catch (Exception e2) { - log.error("Unable to clean up " + tmpShardCacheFile, e2); + log.error("Unable to clean up {}", tmpShardCacheFile, e2); } } } @@ -280,11 +283,11 @@ public void updateCache() throws AccumuloException, AccumuloSecurityException, T private void ensureTableExists(AccumuloClient client, String metadataTableName) throws AccumuloException, AccumuloSecurityException { TableOperations tops = client.tableOperations(); if (!tops.exists(metadataTableName)) { - log.info("Creating table: " + metadataTableName); + log.info("Creating table: {}", metadataTableName); try { tops.create(metadataTableName); } catch (TableExistsException tee) { - log.error(metadataTableName + " already exists someone got here first"); + log.error("{} already exists someone got here first", metadataTableName); } } } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/shard/ShardedDataTypeHandler.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/shard/ShardedDataTypeHandler.java index 4073a14a0f4..a88d7b23649 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/shard/ShardedDataTypeHandler.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/shard/ShardedDataTypeHandler.java @@ -16,7 +16,8 @@ import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.StatusReporter; import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.base.Stopwatch; import com.google.common.cache.CacheBuilder; @@ -121,7 +122,7 @@ */ public abstract class ShardedDataTypeHandler extends StatsDEnabledDataTypeHandler implements DataTypeHandler { - private static final Logger log = ThreadConfigurableLogger.getLogger(ShardedDataTypeHandler.class); + private static final Logger log = LoggerFactory.getLogger(ShardedDataTypeHandler.class); public static final String NUM_SHARDS = ShardIdFactory.NUM_SHARDS; public static final String SHARD_TNAME = "shard.table.name"; @@ -227,13 +228,13 @@ public void setup(TaskAttemptContext context) { String tableName = conf.get(SHARD_TNAME, null); if (null == tableName) - log.error(SHARD_TNAME + " not specified, no events will be created, and the global index will be useless"); + log.error("{} not specified, no events will be created, and the global index will be useless", SHARD_TNAME); else setShardTableName(new Text(tableName)); tableName = conf.get(SHARD_STATS_TNAME, null); if (null == tableName) - log.warn(SHARD_STATS_TNAME + " not specified, no global index mutations will be created."); + log.warn("{} not specified, no global index mutations will be created.", SHARD_STATS_TNAME); else { setIndexStatsTableName(new Text(tableName)); setProduceStats(true); @@ -241,25 +242,25 @@ public void setup(TaskAttemptContext context) { tableName = conf.get(SHARD_GIDX_TNAME, null); if (null == tableName) - log.warn(SHARD_GIDX_TNAME + " not specified, no global index mutations will be created."); + log.warn("{} not specified, no global index mutations will be created.", SHARD_GIDX_TNAME); else setShardIndexTableName(new Text(tableName)); tableName = conf.get(SHARD_GRIDX_TNAME, null); if (null == tableName) - log.warn(SHARD_GRIDX_TNAME + " not specified, no global reverse index mutations will be created."); + log.warn("{} not specified, no global reverse index mutations will be created.", SHARD_GRIDX_TNAME); else setShardReverseIndexTableName(new Text(tableName)); tableName = conf.get(METADATA_TABLE_NAME, null); if (null == tableName) - log.warn(METADATA_TABLE_NAME + " not specified, no metadata will be created, I hope nothing requires normalizers."); + log.warn("{} not specified, no metadata will be created, I hope nothing requires normalizers.", METADATA_TABLE_NAME); else setMetadataTableName(new Text(tableName)); tableName = (LoadDateTableConfigHelper.isLoadDatesEnabled(conf) ? LoadDateTableConfigHelper.getLoadDatesTableName(conf) : null); if (null == tableName) - log.warn(LoadDateTableConfigHelper.LOAD_DATES_TABLE_NAME_PROP + " not specified, no load dates will be created"); + log.warn("{} not specified, no load dates will be created", LoadDateTableConfigHelper.LOAD_DATES_TABLE_NAME_PROP); else setLoadDatesTableName(new Text(tableName)); @@ -270,7 +271,7 @@ public void setup(TaskAttemptContext context) { tableName = conf.get(SHARD_DINDX_NAME, null); if (null == tableName) { - log.warn(SHARD_DINDX_NAME + " not specified, no term dictionary will be created."); + log.warn("{} not specified, no term dictionary will be created.", SHARD_DINDX_NAME); } else { setShardDictionaryIndexTableName(new Text(tableName)); this.setupDictionaryCache(conf.getInt(SHARD_DICTIONARY_CACHE_ENTRIES, SHARD_DINDEX_CACHE_DEFAULT_SIZE)); @@ -299,21 +300,17 @@ public void setup(TaskAttemptContext context) { private void setupToReindexIfEnabled(Configuration conf) { this.isReindexEnabled = conf.getBoolean(IS_REINDEX_ENABLED, false); - log.info("isReindexEnabled: " + this.isReindexEnabled); + log.info("isReindexEnabled: {}", this.isReindexEnabled); if (this.isReindexEnabled) { String commaSeparatedFieldNames = conf.get(FIELDS_TO_REINDEX); - if (log.isDebugEnabled()) { - log.debug("configured reindex fields: " + commaSeparatedFieldNames); - } + log.debug("configured reindex fields: {}", commaSeparatedFieldNames); if (null != commaSeparatedFieldNames) { this.requestedFieldsForReindex = Arrays.asList(commaSeparatedFieldNames.split(",")); } if (null == this.requestedFieldsForReindex || this.requestedFieldsForReindex.isEmpty()) { throw new RuntimeException("Missing or empty " + FIELDS_TO_REINDEX + " from configuration: " + conf); } - if (log.isDebugEnabled()) { - log.debug("list of fields to reindex: " + requestedFieldsForReindex); - } + log.debug("list of fields to reindex: {}", requestedFieldsForReindex); } } @@ -469,7 +466,7 @@ protected Multimap createColumns(RawRecordContainer event, NormalizedContentInterface value = e.getValue(); byte[] visibility = getVisibility(event, value); if (log.isTraceEnabled()) { - log.trace("Is " + e.getKey() + " indexed? " + hasIndexTerm(e.getKey()) + " " + helper.isIndexedField(e.getKey())); + log.trace("Is {} indexed? {} {}", e.getKey(), hasIndexTerm(e.getKey()), helper.isIndexedField(e.getKey())); } values.putAll(createForwardIndices(helper, event, fields, value, visibility, maskedVisibility, maskedFieldHelper, shardId, indexedValue, @@ -714,9 +711,7 @@ protected Multimap createTermIndexColumn(RawRecordContainer Multimap values = ArrayListMultimap.create(); - if (log.isTraceEnabled()) { - log.trace("Create index column " + tableName); - } + log.trace("Create index column {}", tableName); if (null == tableName) { return values; } @@ -738,9 +733,7 @@ protected Multimap createTermIndexColumn(RawRecordContainer if (!StringUtils.isEmpty(normalizedMaskedValue)) { if (direction == Direction.REVERSE) { normalizedMaskedValue = new StringBuilder(normalizedMaskedValue).reverse().toString(); - if (log.isTraceEnabled()) { - log.trace("normalizedMaskedValue is reversed to: " + normalizedMaskedValue); - } + log.trace("normalizedMaskedValue is reversed to: {}", normalizedMaskedValue); } // Create a key for the masked field value with the masked visibility. Key k = this.createIndexKey(normalizedMaskedValue.getBytes(), colf, colq, maskedVisibility, event.getTimestamp(), false); @@ -981,8 +974,7 @@ protected Multimap createShardEventColumn(RawRecordContaine // Else create one key for the field with the original value and the masked visiblity Key cbKey = createKey(shardId, colf, unmaskedColq, refVisibility, event.getTimestamp(), deleteMode); BulkIngestKey bKey = new BulkIngestKey(this.getShardTableName(), cbKey); - if (log.isTraceEnabled()) - log.trace("Creating bulk ingest Key " + bKey); + log.trace("Creating bulk ingest Key {}", bKey); values.put(bKey, NULL_VALUE); } @@ -1033,8 +1025,7 @@ public void createShardFieldIndexColumn(RawRecordContainer event, Multimap createShardFieldIndexColumn(RawRecordContainer event, String fieldName, String fieldValue, byte[] visibility, byte[] maskedVisibility, MaskedFieldHelper maskedFieldHelper, byte[] shardId, Value value) { - if (log.isTraceEnabled()) - log.trace("Field value is " + fieldValue); + log.trace("Field value is {}", fieldValue); // hold on to the helper IngestHelperInterface helper = this.getHelper(event.getDataType()); diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/summary/CoreSummaryDataTypeHandler.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/summary/CoreSummaryDataTypeHandler.java index 94894ba67b5..e7ddb6d7214 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/summary/CoreSummaryDataTypeHandler.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/summary/CoreSummaryDataTypeHandler.java @@ -5,7 +5,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.StatusReporter; import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.Multimap; @@ -20,7 +21,7 @@ import datawave.ingest.metadata.RawRecordMetadata; public abstract class CoreSummaryDataTypeHandler implements DataTypeHandler { - private static final Logger log = ThreadConfigurableLogger.getLogger(CoreSummaryDataTypeHandler.class); + private static final Logger log = LoggerFactory.getLogger(CoreSummaryDataTypeHandler.class); private Configuration mConf = null; diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/summary/MetricsSummaryDataTypeHandler.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/summary/MetricsSummaryDataTypeHandler.java index b0d5433abc5..a0e3870a4c2 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/summary/MetricsSummaryDataTypeHandler.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/summary/MetricsSummaryDataTypeHandler.java @@ -14,7 +14,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.base.Preconditions; import com.google.common.collect.ArrayListMultimap; @@ -39,7 +40,7 @@ */ public class MetricsSummaryDataTypeHandler extends SummaryDataTypeHandler { - private static final Logger log = ThreadConfigurableLogger.getLogger(MetricsSummaryDataTypeHandler.class); + private static final Logger log = LoggerFactory.getLogger(MetricsSummaryDataTypeHandler.class); // configuration keys public static final String METRICS_SUMMARY_PROP_PREFIX = "metrics-"; @@ -142,7 +143,7 @@ public void setMetricsSummaryFormatter(MetricsSummaryFormatter metricsSummaryFor public void setTableName(Configuration conf) { String tableName = conf.get(METRICS_SUMMARY_TABLE_NAME); if (tableName == null) { - log.warn(METRICS_SUMMARY_TABLE_NAME + " not specified, no summary data will be created."); + log.warn("{} not specified, no summary data will be created.", METRICS_SUMMARY_TABLE_NAME); } else { this.metricsSummaryTableName = new Text(tableName); } @@ -199,7 +200,7 @@ public Multimap createEntries(RawRecordContainer record, Mu Set colQs = Sets.newHashSet(metricsSummaryFormatter.getSummaryValuesRegex(colQualFieldsRegexList, fields)); if (log.isTraceEnabled()) { - log.trace("Creating Keys for...rowIds.size() [" + rowIds.size() + "] colFs.size() [" + colFs.size() + "] colQs.size() [" + colQs.size() + "]"); + log.trace("Creating Keys for...rowIds.size() [{}] colFs.size() [{}] colQs.size() [{}]", rowIds.size(), colFs.size(), colQs.size()); } ColumnVisibility vis = new ColumnVisibility(origVis.flatten()); @@ -220,7 +221,7 @@ public Multimap createEntries(RawRecordContainer record, Mu } if (log.isTraceEnabled()) { - log.trace("Created [" + values.size() + "] keys for ingest"); + log.trace("Created [{}] keys for ingest", values.size()); } return values; diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/tokenize/ContentIndexingColumnBasedHandler.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/tokenize/ContentIndexingColumnBasedHandler.java index 34f83102685..231289a5919 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/tokenize/ContentIndexingColumnBasedHandler.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/tokenize/ContentIndexingColumnBasedHandler.java @@ -16,12 +16,15 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.StatusReporter; import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.apache.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TypeAttribute; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.Marker; +import org.slf4j.MarkerFactory; import com.google.common.base.Preconditions; import com.google.common.collect.HashMultimap; @@ -62,7 +65,9 @@ */ public abstract class ContentIndexingColumnBasedHandler extends AbstractColumnBasedHandler implements TermFrequencyIngestHelperInterface { - private static final Logger log = Logger.getLogger(ContentIndexingColumnBasedHandler.class); + private static final Logger log = LoggerFactory.getLogger(ContentIndexingColumnBasedHandler.class); + + private static Marker fatal = MarkerFactory.getMarker("FATAL"); public abstract AbstractContentIngestHelper getContentIndexingDataTypeHelper(); @@ -192,7 +197,7 @@ protected void flushTokenOffsetCache(RawRecordContainer event, Multimap tokenHelper.getTokenizerTimeWarnThresholdMsec() && !tokenizerTimeWarned) { long realDelta = System.currentTimeMillis() - start; counters.incrementValue(ContentIndexCounters.TOKENIZER_TIME_WARNINGS, 1, reporter); - log.warn("Tokenization of field " + modifiedFieldName + " has exceeded warning threshold " - + tokenHelper.getTokenizerTimeWarnThresholdMsec() + "ms (" + realDelta + "ms)"); + log.warn("Tokenization of field {} has exceeded warning threshold {}ms ({}ms)", modifiedFieldName, + tokenHelper.getTokenizerTimeErrorThresholdMsec(), realDelta); tokenizerTimeWarned = true; } @@ -423,30 +428,26 @@ protected void tokenizeField(final Analyzer a, final NormalizedContentInterface // Make sure the term length is greater than the minimum allowed length int tlen = token.length(); if (tlen < tokenHelper.getTermLengthMinimum()) { - log.debug("Ignoring token of length " + token.length() + " because it is too short"); + log.debug("Ignoring token of length {} because it is too short", token.length()); counters.increment(ContentIndexCounters.TOO_SHORT_COUNTER, reporter); continue; } // skip the term if it is over the length limit unless it is a FILE, URL or HTTP_REQUEST if (tlen > tokenHelper.getTermLengthLimit() && (!(type.equals("FILE") || type.equals("URL") || type.equals("HTTP_REQUEST")))) { - if (log.isDebugEnabled()) { - log.debug("Ignoring " + type + " token due to excessive length"); - } + log.debug("Ignoring {} token due to excessive length", type); counters.increment(ContentIndexCounters.EXCESSIVE_LENGTH_COUNTER, reporter); continue; } if (tlen > tokenHelper.getTermLengthWarningLimit()) { - log.warn("Encountered long term: " + tlen + " characters, '" + token + "'"); + log.warn("Encountered long term: {} characters, {}", tlen, token); counters.increment(ContentIndexCounters.LENGTH_WARNING_COUNTER, reporter); } if (truncAtt.isTruncated()) { - if (log.isDebugEnabled()) { - log.debug("Encountered truncated term: " + tlen + " characters, '" + token + "'"); - } + log.debug("Encountered truncated term: {} characters, {}", tlen, token); counters.increment(ContentIndexCounters.TRUNCATION_COUNTER, reporter); } @@ -674,7 +675,7 @@ protected BloomFilterUtil newBloomFilterUtil(final Configuration configuration) final String message = "Unable to create factory for N-grams. ContentIngestHelperInterface is null."; ; - Logger.getLogger(BloomFilterUtil.class).warn(message, new IllegalStateException()); + LoggerFactory.getLogger(BloomFilterUtil.class).warn(message, new IllegalStateException()); } return util; diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/tokenize/ExtendedContentIndexingColumnBasedHandler.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/tokenize/ExtendedContentIndexingColumnBasedHandler.java index 0f7f29d61fb..4eb50f48933 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/tokenize/ExtendedContentIndexingColumnBasedHandler.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/handler/tokenize/ExtendedContentIndexingColumnBasedHandler.java @@ -30,9 +30,10 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskInputOutputContext; import org.apache.hadoop.util.bloom.BloomFilter; -import org.apache.log4j.Logger; import org.apache.lucene.analysis.CharArraySet; import org.infinispan.commons.util.Base64; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.Multimap; @@ -123,7 +124,7 @@ public abstract class ExtendedContentIndexingColumnBasedHandler (INTERVAL * 1.5)) { - log.warn("HeartBeatThread starved for cpu, " + "should execute every " + INTERVAL + " ms, " + "latest: " + delta + " ms."); + log.warn("HeartBeatThread starved for cpu, should execute every {}ms, latest: {}ms.", INTERVAL, delta); } lastRun = currentRun; counter++; @@ -693,7 +696,7 @@ private class DocWriter implements Runnable { @Override public void run() { - log.debug("Writing out a document of size " + value.get().length + " bytes."); + log.debug("Writing out a document of size {} bytes.", value.get().length); Mutation m = new Mutation(new Text(shardId)); m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(visibility), k.getTimestamp(), value); try { diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/CBMutationOutputFormatter.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/CBMutationOutputFormatter.java index 368c7cbfb67..4648ae17d49 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/CBMutationOutputFormatter.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/CBMutationOutputFormatter.java @@ -14,13 +14,14 @@ import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import datawave.ingest.data.config.ingest.AccumuloHelper; import datawave.ingest.mapreduce.handler.shard.ShardedDataTypeHandler; public class CBMutationOutputFormatter extends AccumuloOutputFormat { - private static final Logger log = Logger.getLogger(CBMutationOutputFormatter.class); + private static final Logger log = LoggerFactory.getLogger(CBMutationOutputFormatter.class); @Override public RecordWriter getRecordWriter(TaskAttemptContext attempt) throws IOException { @@ -51,7 +52,7 @@ public static class CBRecordWriter extends RecordWriter { public CBRecordWriter(RecordWriter writer, TaskAttemptContext context) throws IOException { this.delegate = writer; eventTable = context.getConfiguration().get(ShardedDataTypeHandler.SHARD_TNAME, ""); - log.info("Event Table Name property for " + ShardedDataTypeHandler.SHARD_TNAME + " is " + eventTable); + log.info("Event Table Name property for {} is {}", ShardedDataTypeHandler.SHARD_TNAME, eventTable); } @Override diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/ConstraintChecker.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/ConstraintChecker.java index 7805bd98e74..6af7ccdd623 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/ConstraintChecker.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/ConstraintChecker.java @@ -4,7 +4,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; @@ -14,7 +15,7 @@ */ public class ConstraintChecker { - private static final Logger log = Logger.getLogger(ConstraintChecker.class); + private static final Logger log = LoggerFactory.getLogger(ConstraintChecker.class); public static final String INITIALIZERS = "visibility.constraint.initializers"; @@ -42,7 +43,7 @@ public static ConstraintChecker create(Configuration conf) { initializer.addConstraints(conf, constraints); } catch (Exception e) { - log.error("Could invoke ConstraintInitializer: " + initializerClass, e); + log.error("Could invoke ConstraintInitializer: {}", initializerClass, e); throw new RuntimeException("Could invoke ConstraintInitializer: " + initializerClass, e); } } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/DelegatingPartitioner.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/DelegatingPartitioner.java index 6c4f52af353..fc933a7ae9a 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/DelegatingPartitioner.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/DelegatingPartitioner.java @@ -11,7 +11,8 @@ import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Partitioner; import org.apache.hadoop.util.StringUtils; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This partitioner delegates the partitioning logic to other partitioners based on table name. * The table may have its own dedicated partitioner or may share @@ -21,7 +22,7 @@ * partitioners each limit their output to 10 partitioners. The first will go to 0-9 and the other to 10-19. See DelegatePartitioner's getNumPartitions */ public class DelegatingPartitioner extends Partitioner implements Configurable { - protected static final Logger log = Logger.getLogger(DelegatingPartitioner.class); + protected static final Logger log = LoggerFactory.getLogger(DelegatingPartitioner.class); // this gets populated with the table names that have non-default partitioners defined static final String TABLE_NAMES_WITH_CUSTOM_PARTITIONERS = "DelegatingPartitioner.custom.delegate._tablenames"; @@ -101,7 +102,7 @@ public void setConf(Configuration conf) { try { createDelegatesForTables(); } catch (ClassNotFoundException e) { - log.error(e); + log.error("ClassNotFoundException:", e); // the validation step during the job set up identifies missing classes, so fail the mapper throw new RuntimeException(e); } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/SplitsFile.java b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/SplitsFile.java index 28283f940b9..7c25b062454 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/SplitsFile.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/mapreduce/job/SplitsFile.java @@ -17,7 +17,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.Table; @@ -26,7 +27,7 @@ import datawave.util.time.DateHelper; public class SplitsFile { - private static final Logger log = Logger.getLogger(SplitsFile.class); + private static final Logger log = LoggerFactory.getLogger(SplitsFile.class); public static final String SPLIT_WORK_DIR = "split.work.dir"; public static final String MAX_SHARDS_PER_TSERVER = "shardedMap.max.shards.per.tserver"; @@ -45,11 +46,11 @@ public static void setupFile(Job job, Configuration conf) throws IOException, UR boolean doValidation = conf.getBoolean(SHARD_VALIDATION_ENABLED, false); try { - log.info("Base splits: " + baseSplitsPath); + log.info("Base splits: {}", baseSplitsPath); Path destSplits = new Path( conf.get(SPLIT_WORK_DIR) + "/" + conf.get(TableSplitsCache.SPLITS_CACHE_FILE, TableSplitsCache.DEFAULT_SPLITS_CACHE_FILE)); - log.info("Dest splits: " + destSplits); + log.info("Dest splits: {}", destSplits); FileUtil.copy(sourceFs, baseSplitsPath, destFs, destSplits, false, conf); conf.set(TableSplitsCache.SPLITS_CACHE_DIR, conf.get(SPLIT_WORK_DIR)); @@ -66,7 +67,7 @@ public static void setupFile(Job job, Configuration conf) throws IOException, UR } } catch (Exception e) { - log.error("Unable to use splits file because " + e.getMessage()); + log.error("Unable to use splits file because {}", e.getMessage()); throw e; } } @@ -93,13 +94,13 @@ public static void validateShardIdLocations(Configuration conf, String tableName int expectedNumberOfShards = shardIdFactory.getNumShards(datePrefix); boolean shardsExist = shardsExistForDate(shardIdToLocation, datePrefix, expectedNumberOfShards); if (!shardsExist) { - log.error("Shards for " + datePrefix + " for table " + tableName + " do not exist!"); + log.error("Shards for {} for table {} do not exist!", datePrefix, tableName); isValid = false; continue; } boolean shardsAreBalanced = shardsAreBalanced(shardIdToLocation, datePrefix, maxShardsPerTserver); if (!shardsAreBalanced) { - log.error("Shards for " + datePrefix + " for table " + tableName + " are not balanced!"); + log.error("Shards for {} for table {} are not balanced!", datePrefix, tableName); isValid = false; } } @@ -175,7 +176,7 @@ private static boolean shardsAreBalanced(Map locations, String date // if shard is assigned to more tservers than allowed, then the shards are not balanced if (cnt.intValue() > maxShardsPerTserver) { - log.warn(cnt.toInteger() + " Shards for " + datePrefix + " assigned to tablet " + value); + log.warn("{} Shards for {} assigned to tablet {}", cnt.toInteger(), datePrefix, value); dateIsBalanced = false; } diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/util/AbstractNGramTokenizationStrategy.java b/warehouse/ingest-core/src/main/java/datawave/ingest/util/AbstractNGramTokenizationStrategy.java index 093b0997a3b..7661c21d366 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/util/AbstractNGramTokenizationStrategy.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/util/AbstractNGramTokenizationStrategy.java @@ -1,7 +1,8 @@ package datawave.ingest.util; -import org.apache.log4j.Logger; import org.apache.lucene.analysis.ngram.NGramTokenizer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.hash.BloomFilter; @@ -20,7 +21,7 @@ public abstract class AbstractNGramTokenizationStrategy { protected static final int DEFAULT_MAX_NGRAM_LENGTH = 25; private BloomFilter filter; - private final Logger log = Logger.getLogger(AbstractNGramTokenizationStrategy.class); + private final Logger log = LoggerFactory.getLogger(AbstractNGramTokenizationStrategy.class); private AbstractNGramTokenizationStrategy source; /** diff --git a/warehouse/ingest-core/src/main/java/datawave/ingest/util/BloomFilterUtil.java b/warehouse/ingest-core/src/main/java/datawave/ingest/util/BloomFilterUtil.java index 1db52c0c2df..93c27310916 100644 --- a/warehouse/ingest-core/src/main/java/datawave/ingest/util/BloomFilterUtil.java +++ b/warehouse/ingest-core/src/main/java/datawave/ingest/util/BloomFilterUtil.java @@ -7,8 +7,9 @@ import java.util.Map; import java.util.Map.Entry; -import org.apache.log4j.Logger; import org.apache.lucene.analysis.ngram.NGramTokenizer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.Multimap; import com.google.common.hash.BloomFilter; @@ -31,7 +32,7 @@ public class BloomFilterUtil { private static final float FILTER_SIZE_TO_NGRAM_COUNT_FACTOR = 1.1f; private final AbstractContentIngestHelper helper; - private final Logger log = Logger.getLogger(BloomFilterUtil.class); + private final Logger log = LoggerFactory.getLogger(BloomFilterUtil.class); private final int maxAllowedExecutionTime; private int maxNGramLength = AbstractNGramTokenizationStrategy.DEFAULT_MAX_NGRAM_LENGTH; private final String minDiskSpacePath; @@ -80,7 +81,7 @@ protected BloomFilterUtil(final AbstractContentIngestHelper helper, float minMem * the n-gram tokenization strategy * @return The number of generated n-grams * @throws TimeoutException - * if the tokenization operation takes too long in relation to the overall mapred.task.timeout + * if the tokenization operation takes too long in relation to the overall mapreduce.task.timeout */ private int applyNgrams(final String fieldName, final Collection ncis, final AbstractNGramTokenizationStrategy strategy) throws TokenizationException { @@ -129,7 +130,7 @@ public int getMaxNGramLength() { } /** - * Returns the desired filter size to output from the applyNGrams(..) method. This value is meant as an approximation to help limit and optimize the number + * Returns the desired filter size to output from the applyNGrams(.) method. This value is meant as an approximation to help limit and optimize the number * of n-grams applied to a generated filter. A value less than or equal to the EMPTY_FILTER_SIZE effectively turns off pruning optimizations based on filter * size, which could result in unexpectedly large bloom filters. * @@ -157,11 +158,11 @@ public BloomFilterWrapper newDefaultFilter(int expectedInsertions) { } /** - * Create a BloomFilter based on a multi-map of fields + * Create a BloomFilter based on a multimap of fields * * @param fields * The fields and their values with which to create a bloom filter - * @return a wrapped BloomFilter based on a multi-map of fields + * @return a wrapped BloomFilter based on a multimap of fields */ public BloomFilterWrapper newMultimapBasedFilter(final Multimap fields) { // Declare the return value @@ -309,7 +310,7 @@ public BloomFilterWrapper newNGramBasedFilter(final Multimap 0) || (active > 0) || (compl < workUnits)) && !executor.isTerminated()) { if (log != null && (time < (System.currentTimeMillis() - (1000L * 10L)))) { - log.info(type + " running, T: " + active + "/" + poolSize + ", Completed: " + compl + "/" + workUnits + ", " + ", Remaining: " + qSize + ", " - + (cur - start) + " ms elapsed"); + log.info("{} running, T: {}/{}, Completed: {}/{}, Remaining: {}, {} ms elapsed", type, active, poolSize, compl, workUnits, qSize, + (cur - start)); time = System.currentTimeMillis(); } cur = System.currentTimeMillis(); @@ -69,8 +70,8 @@ public static long waitForThreads(Logger log, ThreadPoolExecutor executor, Strin compl = executor.getCompletedTaskCount(); } if (log != null) { - log.info("Finished Waiting for " + type + " running, T: " + active + "/" + poolSize + ", Completed: " + compl + "/" + workUnits + ", " - + ", Remaining: " + qSize + ", " + (cur - start) + " ms elapsed"); + log.info("Finished Waiting for {} running, T: {}/{}, Completed: {}/{}, Remaining: {}, {} ms elapsed", type, active, poolSize, compl, workUnits, + qSize, (cur - start)); } long stop = System.currentTimeMillis(); diff --git a/warehouse/ingest-core/src/main/java/datawave/util/flag/FlagEntryMover.java b/warehouse/ingest-core/src/main/java/datawave/util/flag/FlagEntryMover.java index 401719110f1..87e1a64489a 100644 --- a/warehouse/ingest-core/src/main/java/datawave/util/flag/FlagEntryMover.java +++ b/warehouse/ingest-core/src/main/java/datawave/util/flag/FlagEntryMover.java @@ -7,7 +7,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.cache.Cache; @@ -18,7 +19,7 @@ */ public class FlagEntryMover extends SimpleMover { - private static final Logger log = Logger.getLogger(FlagEntryMover.class); + private static final Logger log = LoggerFactory.getLogger(FlagEntryMover.class); private static final int CHKSUM_MAX = 10 * 1024 * 1000; // 10M public FlagEntryMover(Cache directoryCache, FileSystem fs, InputFile entry) { @@ -80,12 +81,16 @@ private boolean resolveConflict(final Path src, final Path dest) throws IOExcept if (resolved) { // rename tracked locations - log.warn("duplicate ingest file name with different payload(" + src.toUri().toString() + ") - appending timestamp to destination file name"); + if (log.isWarnEnabled()) { + log.warn("duplicate ingest file name with different payload( {} ) - appending timestamp to destination file name", src.toUri().toString()); + } this.entry.renameTrackedLocations(); } else { - log.warn("discarding duplicate ingest file (" + src.toUri().toString() + ") duplicate (" + dest.toUri().toString() + ")"); + if (log.isWarnEnabled()) { + log.warn("discarding duplicate ingest file ( {} ) duplicate ( {} )", src.toUri().toString(), dest.toUri().toString()); + } if (!fs.delete(src, false)) { - log.error("unable to delete duplicate ingest file (" + src.toUri().toString() + ")"); + log.error("unable to delete duplicate ingest file ( {} )", src.toUri().toString()); } } diff --git a/warehouse/ingest-core/src/main/java/datawave/util/flag/FlagMetrics.java b/warehouse/ingest-core/src/main/java/datawave/util/flag/FlagMetrics.java index 3d6a10e366d..81ceef4cc41 100644 --- a/warehouse/ingest-core/src/main/java/datawave/util/flag/FlagMetrics.java +++ b/warehouse/ingest-core/src/main/java/datawave/util/flag/FlagMetrics.java @@ -11,7 +11,8 @@ import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.GzipCodec; import org.apache.hadoop.mapreduce.Counters; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import datawave.ingest.mapreduce.StandaloneStatusReporter; import datawave.ingest.mapreduce.StandaloneTaskAttemptContext; @@ -21,7 +22,7 @@ */ public class FlagMetrics { - private static final Logger log = Logger.getLogger(FlagMetrics.class); + private static final Logger log = LoggerFactory.getLogger(FlagMetrics.class); private static final CompressionCodec cc = new GzipCodec(); private static final SequenceFile.CompressionType ct = SequenceFile.CompressionType.BLOCK; @@ -72,14 +73,14 @@ protected void writeMetrics(final String metricsDirectory, final String baseName Path src = new Path(fileName + ".working"); if (!fs.exists(finishedMetricsFile.getParent())) { if (!fs.mkdirs(finishedMetricsFile.getParent())) { - log.warn("unable to create directory (" + finishedMetricsFile.getParent() + ") metrics write terminated"); + log.warn("unable to create directory ( {} ) metrics write terminated", finishedMetricsFile.getParent()); return; } } if (!fs.exists(src.getParent())) { if (!fs.mkdirs(src.getParent())) { - log.warn("unable to create directory (" + src.getParent() + ") metrics write terminated"); + log.warn("unable to create directory ( {} ) metrics write terminated", src.getParent()); return; } } @@ -99,7 +100,7 @@ protected void writeMetrics(final String metricsDirectory, final String baseName break; // delete src - it will be recreated by while statement if (fs.delete(src, false)) { - log.warn("unable to delete metrics file (" + src + ")"); + log.warn("unable to delete metrics file ( {} )", src); } } diff --git a/warehouse/ingest-core/src/main/java/datawave/util/flag/FlagSocket.java b/warehouse/ingest-core/src/main/java/datawave/util/flag/FlagSocket.java index 6bcd36ed317..e7a9577459e 100644 --- a/warehouse/ingest-core/src/main/java/datawave/util/flag/FlagSocket.java +++ b/warehouse/ingest-core/src/main/java/datawave/util/flag/FlagSocket.java @@ -12,14 +12,15 @@ import java.util.Observable; import java.util.Observer; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * */ public class FlagSocket extends Observable implements Runnable, Observer { - private static final Logger log = Logger.getLogger(FlagSocket.class); + private static final Logger log = LoggerFactory.getLogger(FlagSocket.class); private ServerSocket serverSocket; private volatile boolean running = true; @@ -31,13 +32,15 @@ public FlagSocket(int port) throws IOException { public void run() { // register ourselves to observe... addObserver(this); - log.info("Listening for shutdown commands on port " + serverSocket.getLocalPort()); + if (log.isInfoEnabled()) { + log.info("Listening for shutdown commands on port {}", serverSocket.getLocalPort()); + } while (running) { try { Socket s = serverSocket.accept(); SocketAddress remoteAddress = s.getRemoteSocketAddress(); try { - log.info(remoteAddress + " connected to the shutdown port"); + log.info("{} connected to the shutdown port", remoteAddress); s.setSoTimeout(30000); InputStream is = s.getInputStream(); BufferedReader rdr = new BufferedReader(new InputStreamReader(is)); @@ -47,14 +50,14 @@ public void run() { setChanged(); notifyObservers(line); } catch (SocketTimeoutException e) { - log.info("Timed out waiting for input from " + remoteAddress); + log.info("Timed out waiting for input from {}", remoteAddress); } } catch (SocketException e) { if (running) { - log.info("Socket Exception occurred: " + e.getMessage(), e); + log.info("Socket Exception occurred: {}", e.getMessage(), e); } } catch (IOException e) { - log.error("Error waiting for shutdown connection: " + e.getMessage(), e); + log.error("Error waiting for shutdown connection: {}", e.getMessage(), e); } } } diff --git a/warehouse/ingest-core/src/main/java/datawave/util/flag/SimpleMover.java b/warehouse/ingest-core/src/main/java/datawave/util/flag/SimpleMover.java index bb0205c64eb..0b9a23bed72 100644 --- a/warehouse/ingest-core/src/main/java/datawave/util/flag/SimpleMover.java +++ b/warehouse/ingest-core/src/main/java/datawave/util/flag/SimpleMover.java @@ -5,7 +5,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.log4j.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.cache.Cache; @@ -16,7 +17,7 @@ */ public class SimpleMover implements Callable { - private static final Logger log = Logger.getLogger(SimpleMover.class); + private static final Logger log = LoggerFactory.getLogger(SimpleMover.class); final InputFile entry; final TrackedDir target; @@ -38,7 +39,9 @@ public InputFile call() throws IOException { if (entry.getCurrentDir() == dst || (!fs.exists(dst) && fs.rename(entry.getCurrentDir(), dst))) { entry.updateCurrentDir(this.target); } else { - log.error("Unable to move file " + entry.getCurrentDir().toUri() + " to " + dst.toUri() + ", skipping"); + if (log.isErrorEnabled()) { + log.error("Unable to move file {} to {}, skipping", entry.getCurrentDir().toUri(), dst.toUri()); + } } return entry; @@ -50,7 +53,7 @@ Path checkParent(Path path) throws IOException { if (fs.mkdirs(parent)) { directoryCache.put(parent, parent); } else { - log.warn("unable to create directory (" + parent + ")"); + log.warn("unable to create directory ( {} )", parent); } } return path; diff --git a/warehouse/ingest-core/src/test/java/datawave/ingest/util/NGramTokenizationStrategyTest.java b/warehouse/ingest-core/src/test/java/datawave/ingest/util/NGramTokenizationStrategyTest.java index 07d8ed864f4..06c395242a6 100644 --- a/warehouse/ingest-core/src/test/java/datawave/ingest/util/NGramTokenizationStrategyTest.java +++ b/warehouse/ingest-core/src/test/java/datawave/ingest/util/NGramTokenizationStrategyTest.java @@ -10,13 +10,14 @@ import java.util.Map; import java.util.Vector; -import org.apache.log4j.Logger; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.powermock.api.easymock.PowerMock; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.hash.BloomFilter; @@ -28,7 +29,7 @@ import datawave.ingest.util.TimeoutStrategy.TimeoutException; @RunWith(PowerMockRunner.class) -@PrepareForTest({ResourceAvailabilityUtil.class, Logger.class, MemberShipTest.class, BloomFilter.class}) +@PrepareForTest({ResourceAvailabilityUtil.class, LoggerFactory.class, MemberShipTest.class, BloomFilter.class}) public class NGramTokenizationStrategyTest { BloomFilter filter; @@ -130,8 +131,8 @@ public void testTokenize_LowMemory() throws Exception { int expectedNGramCount = BloomFilterUtil.predictNGramCount(fieldValue, MemoryStarvationStrategy.DEFAULT_MAX_NGRAM_LENGTH); // Set expectations - PowerMock.mockStaticPartial(Logger.class, "getLogger"); - expect(Logger.getLogger(isA(Class.class))).andReturn(this.logger).anyTimes(); + PowerMock.mockStaticPartial(LoggerFactory.class, "getLogger"); + expect(LoggerFactory.getLogger(isA(Class.class))).andReturn(this.logger).anyTimes(); PowerMock.mockStaticPartial(ResourceAvailabilityUtil.class, "isMemoryAvailable"); expect(ResourceAvailabilityUtil.isMemoryAvailable(.05f)).andReturn(true); PowerMock.mockStaticPartial(ResourceAvailabilityUtil.class, "isMemoryAvailable"); @@ -160,8 +161,8 @@ public void testTokenize_LowDiskSpace() throws Exception { int expectedNGramCount = BloomFilterUtil.predictNGramCount(fieldValue, DiskSpaceStarvationStrategy.DEFAULT_MAX_NGRAM_LENGTH); // Set expectations - PowerMock.mockStaticPartial(Logger.class, "getLogger"); - expect(Logger.getLogger(isA(Class.class))).andReturn(this.logger).anyTimes(); + PowerMock.mockStaticPartial(LoggerFactory.class, "getLogger"); + expect(LoggerFactory.getLogger(isA(Class.class))).andReturn(this.logger).anyTimes(); PowerMock.mockStaticPartial(ResourceAvailabilityUtil.class, "isDiskAvailable"); expect(ResourceAvailabilityUtil.isDiskAvailable("/", .05f)).andReturn(true); PowerMock.mockStaticPartial(ResourceAvailabilityUtil.class, "isDiskAvailable"); @@ -509,8 +510,8 @@ public void testTokenize_StrategyStack() throws Exception { int timeoutAfterNGramCount = BloomFilterUtil.predictNGramCount(ncis.iterator().next().getIndexedFieldValue()); // Set expectations - PowerMock.mockStaticPartial(Logger.class, "getLogger"); - expect(Logger.getLogger(isA(Class.class))).andReturn(this.logger).anyTimes(); + PowerMock.mockStaticPartial(LoggerFactory.class, "getLogger"); + expect(LoggerFactory.getLogger(isA(Class.class))).andReturn(this.logger).anyTimes(); PowerMock.mockStaticPartial(ResourceAvailabilityUtil.class, "isDiskAvailable"); expect(ResourceAvailabilityUtil.isDiskAvailable("/", .05f)).andReturn(true).times(1); expect(ResourceAvailabilityUtil.isDiskAvailable("/", .05f)).andReturn(false).times(1);