diff --git a/build.xml b/build.xml
index eb82ff75..2181586e 100644
--- a/build.xml
+++ b/build.xml
@@ -484,6 +484,14 @@
+
+
+
diff --git a/conf/.gitignore b/conf/.gitignore
new file mode 100644
index 00000000..c0b9de64
--- /dev/null
+++ b/conf/.gitignore
@@ -0,0 +1,15 @@
+# Licensed to Cloudera, Inc. under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# Cloudera, Inc. licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+/sqoop-site.xml
diff --git a/conf/sqoop-default.xml b/conf/sqoop-default.xml
deleted file mode 100644
index 5355acde..00000000
--- a/conf/sqoop-default.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
-
-
-
-
-
-
-
- sqoop.connection.factories
- com.cloudera.sqoop.manager.DefaultManagerFactory
- A comma-delimited list of ManagerFactory implementations
- which are consulted, in order, to instantiate ConnManager instances
- used to drive connections to databases.
-
-
-
-
diff --git a/conf/sqoop-site-template.xml b/conf/sqoop-site-template.xml
new file mode 100644
index 00000000..0f06f84a
--- /dev/null
+++ b/conf/sqoop-site-template.xml
@@ -0,0 +1,124 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/java/com/cloudera/sqoop/Sqoop.java b/src/java/com/cloudera/sqoop/Sqoop.java
index 83521dbc..c33e609d 100644
--- a/src/java/com/cloudera/sqoop/Sqoop.java
+++ b/src/java/com/cloudera/sqoop/Sqoop.java
@@ -46,7 +46,6 @@ public class Sqoop extends Configured implements Tool {
public static final String SQOOP_RETHROW_PROPERTY = "sqoop.throwOnError";
static {
- Configuration.addDefaultResource("sqoop-default.xml");
Configuration.addDefaultResource("sqoop-site.xml");
}
diff --git a/src/java/com/cloudera/sqoop/SqoopOptions.java b/src/java/com/cloudera/sqoop/SqoopOptions.java
index 961b007f..37e18337 100644
--- a/src/java/com/cloudera/sqoop/SqoopOptions.java
+++ b/src/java/com/cloudera/sqoop/SqoopOptions.java
@@ -20,9 +20,7 @@
package com.cloudera.sqoop;
import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Properties;
@@ -39,6 +37,15 @@ public class SqoopOptions {
public static final Log LOG = LogFactory.getLog(SqoopOptions.class.getName());
+ /**
+ * Set to true in configuration if you want to put db passwords
+ * in the metastore.
+ */
+ public static final String METASTORE_PASSWORD_KEY =
+ "sqoop.metastore.client.record.password";
+
+ public static final boolean METASTORE_PASSWORD_DEFAULT = false;
+
/**
* Thrown when invalid cmdline options are given.
*/
@@ -68,19 +75,20 @@ public enum FileLayout {
// TODO(aaron): Adding something here? Add a setter and a getter.
- // Add a default value in initDefaults() if you need one.
- // If you want to load from a properties file, add an entry in the
- // loadFromProperties() method.
- // Then add command-line arguments in the appropriate tools. The
- // names of all command-line args are stored as constants in BaseSqoopTool.
+ // Add a default value in initDefaults() if you need one. If this value
+ // needs to be serialized in the metastore for this session, you need to add
+ // an appropriate line to loadProperties() and writeProperties(). Then add
+ // command-line arguments in the appropriate tools. The names of all
+ // command-line args are stored as constants in BaseSqoopTool.
+
private String connectString;
private String tableName;
private String [] columns;
private String username;
- private String password;
+ private String password; // May not be serialized, based on configuration.
private String codeOutputDir;
private String jarOutputDir;
- private String hadoopHome;
+ private String hadoopHome; // not serialized to metastore.
private String splitByCol;
private String whereClause;
private String sqlQuery;
@@ -90,8 +98,8 @@ public enum FileLayout {
private boolean append;
private FileLayout layout;
private boolean direct; // if true and conn is mysql, use mysqldump.
- private String tmpDir; // where temp data goes; usually /tmp
- private String hiveHome;
+ private String tmpDir; // where temp data goes; usually /tmp; not serialized.
+ private String hiveHome; // not serialized to metastore.
private boolean hiveImport;
private boolean overwriteHiveTable;
private String hiveTableName;
@@ -99,6 +107,7 @@ public enum FileLayout {
// An ordered list of column names denoting what order columns are
// serialized to a PreparedStatement from a generated record type.
+ // Not serialized to metastore.
private String [] dbOutColumns;
// package+class to apply to individual table import.
@@ -133,8 +142,6 @@ public enum FileLayout {
public static final int DEFAULT_NUM_MAPPERS = 4;
- private static final String DEFAULT_CONFIG_FILE = "sqoop.properties";
-
private String [] extraArgs;
private String hbaseTable; // HBase table to import into.
@@ -182,62 +189,300 @@ private long getLongProperty(Properties props, String propName,
}
}
- private void loadFromProperties() {
- File configFile = new File(DEFAULT_CONFIG_FILE);
- if (!configFile.canRead()) {
- return; //can't do this.
+ private int getIntProperty(Properties props, String propName,
+ int defaultVal) {
+ long longVal = getLongProperty(props, propName, defaultVal);
+ return (int) longVal;
+ }
+
+ private char getCharProperty(Properties props, String propName,
+ char defaultVal) {
+ int intVal = getIntProperty(props, propName, (int) defaultVal);
+ return (char) intVal;
+ }
+
+ private DelimiterSet getDelimiterProperties(Properties props,
+ String prefix, DelimiterSet defaults) {
+
+ if (null == defaults) {
+ defaults = new DelimiterSet();
}
- Properties props = new Properties();
- InputStream istream = null;
- try {
- LOG.info("Loading properties from " + configFile.getAbsolutePath());
- istream = new FileInputStream(configFile);
- props.load(istream);
+ char field = getCharProperty(props, prefix + ".field",
+ defaults.getFieldsTerminatedBy());
+ char record = getCharProperty(props, prefix + ".record",
+ defaults.getLinesTerminatedBy());
+ char enclose = getCharProperty(props, prefix + ".enclose",
+ defaults.getEnclosedBy());
+ char escape = getCharProperty(props, prefix + ".escape",
+ defaults.getEscapedBy());
+ boolean required = getBooleanProperty(props, prefix +".enclose.required",
+ defaults.isEncloseRequired());
- this.hadoopHome = props.getProperty("hadoop.home", this.hadoopHome);
- this.codeOutputDir = props.getProperty("out.dir", this.codeOutputDir);
- this.jarOutputDir = props.getProperty("bin.dir", this.jarOutputDir);
- this.username = props.getProperty("db.username", this.username);
- this.password = props.getProperty("db.password", this.password);
- this.tableName = props.getProperty("db.table", this.tableName);
- this.connectString = props.getProperty("db.connect.url",
- this.connectString);
- this.splitByCol = props.getProperty("db.split.column", this.splitByCol);
- this.whereClause = props.getProperty("db.where.clause", this.whereClause);
- this.driverClassName = props.getProperty("jdbc.driver",
- this.driverClassName);
- this.warehouseDir = props.getProperty("hdfs.warehouse.dir",
- this.warehouseDir);
- this.hiveHome = props.getProperty("hive.home", this.hiveHome);
- this.className = props.getProperty("java.classname", this.className);
- this.packageName = props.getProperty("java.packagename",
- this.packageName);
- this.existingJarFile = props.getProperty("java.jar.file",
- this.existingJarFile);
- this.exportDir = props.getProperty("export.dir", this.exportDir);
+ return new DelimiterSet(field, record, enclose, escape, required);
+ }
- this.direct = getBooleanProperty(props, "direct.import", this.direct);
- this.hiveImport = getBooleanProperty(props, "hive.import",
- this.hiveImport);
- this.overwriteHiveTable = getBooleanProperty(props,
- "hive.overwrite.table", this.overwriteHiveTable);
- this.useCompression = getBooleanProperty(props, "compression",
- this.useCompression);
- this.directSplitSize = getLongProperty(props, "direct.split.size",
- this.directSplitSize);
- } catch (IOException ioe) {
- LOG.error("Could not read properties file " + DEFAULT_CONFIG_FILE + ": "
- + ioe.toString());
- } finally {
- if (null != istream) {
- try {
- istream.close();
- } catch (IOException ioe) {
- // Ignore this; we're closing.
- }
+ private void setDelimiterProperties(Properties props,
+ String prefix, DelimiterSet values) {
+ putProperty(props, prefix + ".field",
+ Integer.toString((int) values.getFieldsTerminatedBy()));
+ putProperty(props, prefix + ".record",
+ Integer.toString((int) values.getLinesTerminatedBy()));
+ putProperty(props, prefix + ".enclose",
+ Integer.toString((int) values.getEnclosedBy()));
+ putProperty(props, prefix + ".escape",
+ Integer.toString((int) values.getEscapedBy()));
+ putProperty(props, prefix + ".enclose.required",
+ Boolean.toString(values.isEncloseRequired()));
+ }
+
+ /** Take a comma-delimited list of input and split the elements
+ * into an output array. */
+ private String [] listToArray(String strList) {
+ return strList.split(",");
+ }
+
+ private String arrayToList(String [] array) {
+ if (null == array) {
+ return null;
+ }
+
+ StringBuilder sb = new StringBuilder();
+ boolean first = true;
+ for (String elem : array) {
+ if (!first) {
+ sb.append(",");
}
+ sb.append(elem);
+ first = false;
}
+
+ return sb.toString();
+ }
+
+ /**
+ * A put() method for Properties that is tolerent of 'null' values.
+ * If a null value is specified, the property is unset.
+ */
+ private void putProperty(Properties props, String k, String v) {
+ if (null == v) {
+ props.remove(k);
+ } else {
+ props.setProperty(k, v);
+ }
+ }
+
+ /**
+ * Given a property prefix that denotes a set of numbered properties,
+ * return an array containing all the properties.
+ *
+ * For instance, if prefix is "foo", then return properties "foo.0",
+ * "foo.1", "foo.2", and so on as an array. If no such properties
+ * exist, return 'defaults'.
+ */
+ private String [] getArgArrayProperty(Properties props, String prefix,
+ String [] defaults) {
+ int cur = 0;
+ ArrayList al = new ArrayList();
+ while (true) {
+ String curProp = prefix + "." + cur;
+ String curStr = props.getProperty(curProp, null);
+ if (null == curStr) {
+ break;
+ }
+
+ al.add(curStr);
+ cur++;
+ }
+
+ if (cur == 0) {
+ // Couldn't find an array here; return the defaults.
+ return defaults;
+ }
+
+ return al.toArray(new String[0]);
+ }
+
+ private void setArgArrayProperties(Properties props, String prefix,
+ String [] values) {
+ if (null == values) {
+ return;
+ }
+
+ for (int i = 0; i < values.length; i++) {
+ putProperty(props, prefix + "." + i, values[i]);
+ }
+ }
+
+ /**
+ * Given a set of properties, load this into the current SqoopOptions
+ * instance.
+ */
+ public void loadProperties(Properties props) {
+
+ this.connectString = props.getProperty("db.connect.string",
+ this.connectString);
+ this.username = props.getProperty("db.username", this.username);
+
+ if (getBooleanProperty(props, "db.require.password", false)) {
+ // The user's password was stripped out from the metastore.
+ // Require that the user enter it now.
+ setPasswordFromConsole();
+ } else {
+ this.password = props.getProperty("db.password", this.password);
+ }
+
+ this.tableName = props.getProperty("db.table", this.tableName);
+ String colListStr = props.getProperty("db.column.list", null);
+ if (null != colListStr) {
+ this.columns = listToArray(colListStr);
+ }
+
+ this.codeOutputDir = props.getProperty("codegen.output.dir",
+ this.codeOutputDir);
+ this.jarOutputDir = props.getProperty("codegen.compile.dir",
+ this.jarOutputDir);
+
+ this.splitByCol = props.getProperty("db.split.column", this.splitByCol);
+ this.whereClause = props.getProperty("db.where.clause", this.whereClause);
+ this.sqlQuery = props.getProperty("db.query", this.sqlQuery);
+
+ this.driverClassName = props.getProperty("jdbc.driver.class",
+ this.driverClassName);
+
+ this.warehouseDir = props.getProperty("hdfs.warehouse.dir",
+ this.warehouseDir);
+ this.targetDir = props.getProperty("hdfs.target.dir",
+ this.targetDir);
+ this.append = getBooleanProperty(props, "hdfs.append.dir", this.append);
+
+ String fileFmtStr = props.getProperty("hdfs.file.format", "text");
+ if (fileFmtStr.equals("seq")) {
+ this.layout = FileLayout.SequenceFile;
+ } else {
+ this.layout = FileLayout.TextFile;
+ }
+
+ this.direct = getBooleanProperty(props, "direct.import", this.direct);
+
+ this.hiveImport = getBooleanProperty(props, "hive.import",
+ this.hiveImport);
+ this.overwriteHiveTable = getBooleanProperty(props,
+ "hive.overwrite.table", this.overwriteHiveTable);
+ this.hiveTableName = props.getProperty("hive.table.name",
+ this.hiveTableName);
+
+ this.className = props.getProperty("codegen.java.classname",
+ this.className);
+ this.packageName = props.getProperty("codegen.java.packagename",
+ this.packageName);
+ this.existingJarFile = props.getProperty("codegen.jar.file",
+ this.existingJarFile);
+
+ this.numMappers = getIntProperty(props, "mapreduce.num.mappers",
+ this.numMappers);
+
+ this.useCompression = getBooleanProperty(props, "enable.compression",
+ this.useCompression);
+
+ this.directSplitSize = getLongProperty(props, "import.direct.split.size",
+ this.directSplitSize);
+
+ this.maxInlineLobSize = getLongProperty(props,
+ "import.max.inline.lob.size", this.maxInlineLobSize);
+
+ this.exportDir = props.getProperty("export.source.dir", this.exportDir);
+ this.updateKeyCol = props.getProperty("export.update.col",
+ this.updateKeyCol);
+
+ this.inputDelimiters = getDelimiterProperties(props,
+ "codegen.input.delimiters", this.inputDelimiters);
+ this.outputDelimiters = getDelimiterProperties(props,
+ "codegen.output.delimiters", this.outputDelimiters);
+
+ this.extraArgs = getArgArrayProperty(props, "tool.arguments",
+ this.extraArgs);
+
+ this.hbaseTable = props.getProperty("hbase.table", this.hbaseTable);
+ this.hbaseColFamily = props.getProperty("hbase.col.family",
+ this.hbaseColFamily);
+ this.hbaseRowKeyCol = props.getProperty("hbase.row.key.col",
+ this.hbaseRowKeyCol);
+ this.hbaseCreateTable = getBooleanProperty(props, "hbase.create.table",
+ this.hbaseCreateTable);
+ }
+
+ /**
+ * Return a Properties instance that encapsulates all the "sticky"
+ * state of this SqoopOptions that should be written to a metastore
+ * to restore the session later.
+ */
+ public Properties writeProperties() {
+ Properties props = new Properties();
+
+ putProperty(props, "db.connect.string", this.connectString);
+ putProperty(props, "db.username", this.username);
+
+ if (this.getConf().getBoolean(
+ METASTORE_PASSWORD_KEY, METASTORE_PASSWORD_DEFAULT)) {
+ // If the user specifies, we may store the password in the metastore.
+ putProperty(props, "db.password", this.password);
+ putProperty(props, "db.require.password", "false");
+ } else if (this.password != null) {
+ // Otherwise, if the user has set a password, we just record
+ // a flag stating that the password will need to be reentered.
+ putProperty(props, "db.require.password", "true");
+ } else {
+ // No password saved or required.
+ putProperty(props, "db.require.password", "false");
+ }
+
+ putProperty(props, "db.table", this.tableName);
+ putProperty(props, "db.column.list", arrayToList(this.columns));
+ putProperty(props, "codegen.output.dir", this.codeOutputDir);
+ putProperty(props, "codegen.compile.dir", this.jarOutputDir);
+ putProperty(props, "db.split.column", this.splitByCol);
+ putProperty(props, "db.where.clause", this.whereClause);
+ putProperty(props, "db.query", this.sqlQuery);
+ putProperty(props, "jdbc.driver.class", this.driverClassName);
+ putProperty(props, "hdfs.warehouse.dir", this.warehouseDir);
+ putProperty(props, "hdfs.target.dir", this.targetDir);
+ putProperty(props, "hdfs.append.dir", Boolean.toString(this.append));
+ if (this.layout == FileLayout.SequenceFile) {
+ putProperty(props, "hdfs.file.format", "seq");
+ } else {
+ putProperty(props, "hdfs.file.format", "text");
+ }
+ putProperty(props, "direct.import", Boolean.toString(this.direct));
+ putProperty(props, "hive.import", Boolean.toString(this.hiveImport));
+ putProperty(props, "hive.overwrite.table",
+ Boolean.toString(this.overwriteHiveTable));
+ putProperty(props, "hive.table.name", this.hiveTableName);
+ putProperty(props, "codegen.java.classname", this.className);
+ putProperty(props, "codegen.java.packagename", this.packageName);
+ putProperty(props, "codegen.jar.file", this.existingJarFile);
+ putProperty(props, "mapreduce.num.mappers",
+ Integer.toString(this.numMappers));
+ putProperty(props, "enable.compression",
+ Boolean.toString(this.useCompression));
+ putProperty(props, "import.direct.split.size",
+ Long.toString(this.directSplitSize));
+ putProperty(props, "import.max.inline.lob.size",
+ Long.toString(this.maxInlineLobSize));
+ putProperty(props, "export.source.dir", this.exportDir);
+ putProperty(props, "export.update.col", this.updateKeyCol);
+ setDelimiterProperties(props, "codegen.input.delimiters",
+ this.inputDelimiters);
+ setDelimiterProperties(props, "codegen.output.delimiters",
+ this.outputDelimiters);
+ setArgArrayProperties(props, "tool.arguments", this.extraArgs);
+ putProperty(props, "hbase.table", this.hbaseTable);
+ putProperty(props, "hbase.col.family", this.hbaseColFamily);
+ putProperty(props, "hbase.row.key.col", this.hbaseRowKeyCol);
+ putProperty(props, "hbase.create.table",
+ Boolean.toString(this.hbaseCreateTable));
+
+ return props;
}
/**
@@ -291,8 +536,6 @@ private void initDefaults(Configuration baseConfiguration) {
this.extraArgs = null;
this.dbOutColumns = null;
-
- loadFromProperties();
}
/**
diff --git a/src/java/com/cloudera/sqoop/metastore/SessionData.java b/src/java/com/cloudera/sqoop/metastore/SessionData.java
new file mode 100644
index 00000000..52b4ce7f
--- /dev/null
+++ b/src/java/com/cloudera/sqoop/metastore/SessionData.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to Cloudera, Inc. under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Cloudera, Inc. licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.cloudera.sqoop.metastore;
+
+import com.cloudera.sqoop.SqoopOptions;
+import com.cloudera.sqoop.tool.SqoopTool;
+
+/**
+ * Container for all session data that should be stored to a
+ * permanent resource.
+ */
+public class SessionData {
+ private SqoopOptions opts;
+ private SqoopTool tool;
+
+ public SessionData() {
+ }
+
+ public SessionData(SqoopOptions options, SqoopTool sqoopTool) {
+ this.opts = options;
+ this.tool = sqoopTool;
+ }
+
+ /**
+ * Gets the SqoopOptions.
+ */
+ public SqoopOptions getSqoopOptions() {
+ return this.opts;
+ }
+
+ /**
+ * Gets the SqoopTool.
+ */
+ public SqoopTool getSqoopTool() {
+ return this.tool;
+ }
+
+ /**
+ * Sets the SqoopOptions.
+ */
+ public void setSqoopOptions(SqoopOptions options) {
+ this.opts = options;
+ }
+
+ /**
+ * Sets the SqoopTool.
+ */
+ public void setSqoopTool(SqoopTool sqoopTool) {
+ this.tool = sqoopTool;
+ }
+
+}
+
diff --git a/src/java/com/cloudera/sqoop/metastore/SessionStorage.java b/src/java/com/cloudera/sqoop/metastore/SessionStorage.java
new file mode 100644
index 00000000..3291acda
--- /dev/null
+++ b/src/java/com/cloudera/sqoop/metastore/SessionStorage.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to Cloudera, Inc. under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Cloudera, Inc. licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.cloudera.sqoop.metastore;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configured;
+
+/**
+ * API that defines how sessions are saved, restored, and manipulated.
+ *
+ *
+ * SessionStorage instances may be created and then not used; the
+ * SessionStorage factory may create additional SessionStorage instances
+ * that return false from accept() and then discard them. The close()
+ * method will only be triggered for a SessionStorage if the connect()
+ * method is called. Connection should not be triggered by a call to
+ * accept().
+ */
+public abstract class SessionStorage extends Configured implements Closeable {
+
+ /**
+ * Returns true if the SessionStorage system can use the metadata in
+ * the descriptor to connect to an underlying session resource.
+ */
+ public abstract boolean canAccept(Map descriptor);
+
+
+ /**
+ * Opens / connects to the underlying storage resource specified by the
+ * descriptor.
+ */
+ public abstract void open(Map descriptor)
+ throws IOException;
+
+ /**
+ * Given a session name, reconstitute a SessionData that contains all
+ * configuration information required for the session. Returns null if the
+ * session name does not match an available session.
+ */
+ public abstract SessionData read(String sessionName)
+ throws IOException;
+
+ /**
+ * Forget about a saved session.
+ */
+ public abstract void delete(String sessionName) throws IOException;
+
+ /**
+ * Given a session name and the data describing a configured
+ * session, record the session information to the storage medium.
+ */
+ public abstract void create(String sessionName, SessionData data)
+ throws IOException;
+
+ /**
+ * Given a session descriptor and a configured session
+ * update the underlying resource to match the current session
+ * configuration.
+ */
+ public abstract void update(String sessionName, SessionData data)
+ throws IOException;
+
+ /**
+ * Close any resources opened by the SessionStorage system.
+ */
+ public void close() throws IOException {
+ }
+
+ /**
+ * Enumerate all sessions held in the connected resource.
+ */
+ public abstract List list() throws IOException;
+}
+
diff --git a/src/java/com/cloudera/sqoop/metastore/SessionStorageFactory.java b/src/java/com/cloudera/sqoop/metastore/SessionStorageFactory.java
new file mode 100644
index 00000000..3ae75bb8
--- /dev/null
+++ b/src/java/com/cloudera/sqoop/metastore/SessionStorageFactory.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to Cloudera, Inc. under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Cloudera, Inc. licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.cloudera.sqoop.metastore;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Factory that produces the correct SessionStorage system to work with
+ * a particular session descriptor.
+ */
+public class SessionStorageFactory {
+
+ private Configuration conf;
+
+ /**
+ * Configuration key describing the list of SessionStorage implementations
+ * to use to handle sessions.
+ */
+ public static final String AVAILABLE_STORAGES_KEY =
+ "sqoop.session.storage.implementations";
+
+ /** The default list of available SessionStorage implementations. */
+ private static final String DEFAULT_AVAILABLE_STORAGES =
+ "com.cloudera.sqoop.metastore.hsqldb.HsqldbSessionStorage,"
+ + "com.cloudera.sqoop.metastore.hsqldb.AutoHsqldbStorage";
+
+ public SessionStorageFactory(Configuration config) {
+ this.conf = config;
+
+ // Ensure that we always have an available storages list.
+ if (this.conf.get(AVAILABLE_STORAGES_KEY) == null) {
+ this.conf.set(AVAILABLE_STORAGES_KEY, DEFAULT_AVAILABLE_STORAGES);
+ }
+ }
+
+ /**
+ * Given a session descriptor, determine the correct SessionStorage
+ * implementation to use to handle the session and return an instance
+ * of it -- or null if no SessionStorage instance is appropriate.
+ */
+ public SessionStorage getSessionStorage(Map descriptor) {
+ List storages = this.conf.getInstances(
+ AVAILABLE_STORAGES_KEY, SessionStorage.class);
+ for (SessionStorage stor : storages) {
+ if (stor.canAccept(descriptor)) {
+ return stor;
+ }
+ }
+
+ return null;
+ }
+}
+
diff --git a/src/java/com/cloudera/sqoop/metastore/hsqldb/AutoHsqldbStorage.java b/src/java/com/cloudera/sqoop/metastore/hsqldb/AutoHsqldbStorage.java
new file mode 100644
index 00000000..ffcde79e
--- /dev/null
+++ b/src/java/com/cloudera/sqoop/metastore/hsqldb/AutoHsqldbStorage.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to Cloudera, Inc. under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Cloudera, Inc. licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.cloudera.sqoop.metastore.hsqldb;
+
+import java.io.File;
+import java.io.IOException;
+
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * SessionStorage implementation that auto-configures an HSQLDB
+ * local-file-based instance to hold sessions.
+ */
+public class AutoHsqldbStorage extends HsqldbSessionStorage {
+
+ public static final Log LOG = LogFactory.getLog(
+ AutoHsqldbStorage.class.getName());
+
+ /**
+ * Configuration key specifying whether this storage agent is active.
+ * Defaults to "on" to allow zero-conf local users.
+ */
+ public static final String AUTO_STORAGE_IS_ACTIVE_KEY =
+ "sqoop.metastore.client.enable.autoconnect";
+
+ /**
+ * Configuration key specifying the connect string used by this
+ * storage agent.
+ */
+ public static final String AUTO_STORAGE_CONNECT_STRING_KEY =
+ "sqoop.metastore.client.autoconnect.url";
+
+ /**
+ * Configuration key specifying the username to bind with.
+ */
+ public static final String AUTO_STORAGE_USER_KEY =
+ "sqoop.metastore.client.autoconnect.username";
+
+
+ /** HSQLDB default user is named 'SA'. */
+ private static final String DEFAULT_AUTO_USER = "SA";
+
+ /**
+ * Configuration key specifying the password to bind with.
+ */
+ public static final String AUTO_STORAGE_PASS_KEY =
+ "sqoop.metastore.client.autoconnect.password";
+
+ /** HSQLDB default user has an empty password. */
+ public static final String DEFAULT_AUTO_PASSWORD = "";
+
+ @Override
+ /** {@inheritDoc} */
+ public boolean canAccept(Map descriptor) {
+ Configuration conf = this.getConf();
+ return conf.getBoolean(AUTO_STORAGE_IS_ACTIVE_KEY, true);
+ }
+
+ /**
+ * Determine the user's home directory and return a connect
+ * string to HSQLDB that uses ~/.sqoop/ as the storage location
+ * for the metastore database.
+ */
+ private String getHomeDirFileConnectStr() {
+ String homeDir = System.getProperty("user.home");
+
+ File homeDirObj = new File(homeDir);
+ File sqoopDataDirObj = new File(homeDirObj, ".sqoop");
+ File databaseFileObj = new File(sqoopDataDirObj, "metastore.db");
+
+ String dbFileStr = databaseFileObj.toString();
+ return "jdbc:hsqldb:file:" + dbFileStr
+ + ";hsqldb.write_delay=false;shutdown=true";
+ }
+
+ @Override
+ /**
+ * Set the connection information to use the auto-inferred connection
+ * string.
+ */
+ public void open(Map descriptor) throws IOException {
+ Configuration conf = getConf();
+ setMetastoreConnectStr(conf.get(AUTO_STORAGE_CONNECT_STRING_KEY,
+ getHomeDirFileConnectStr()));
+ setMetastoreUser(conf.get(AUTO_STORAGE_USER_KEY, DEFAULT_AUTO_USER));
+ setMetastorePassword(conf.get(AUTO_STORAGE_PASS_KEY,
+ DEFAULT_AUTO_PASSWORD));
+
+ init();
+ }
+}
+
diff --git a/src/java/com/cloudera/sqoop/metastore/hsqldb/HsqldbMetaStore.java b/src/java/com/cloudera/sqoop/metastore/hsqldb/HsqldbMetaStore.java
new file mode 100644
index 00000000..235695c7
--- /dev/null
+++ b/src/java/com/cloudera/sqoop/metastore/hsqldb/HsqldbMetaStore.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to Cloudera, Inc. under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Cloudera, Inc. licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package com.cloudera.sqoop.metastore.hsqldb;
+
+import java.io.File;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+import org.apache.hadoop.util.StringUtils;
+
+import org.hsqldb.Server;
+import org.hsqldb.ServerConstants;
+
+import com.cloudera.sqoop.SqoopOptions;
+
+import com.cloudera.sqoop.manager.HsqldbManager;
+
+/**
+ * Container for an HSQLDB-backed metastore.
+ */
+public class HsqldbMetaStore {
+
+ public static final Log LOG = LogFactory.getLog(
+ HsqldbMetaStore.class.getName());
+
+ /** Where on the local fs does the metastore put files? */
+ public static final String META_STORAGE_LOCATION_KEY =
+ "sqoop.metastore.server.location";
+
+ /**
+ * What port does the metastore listen on?
+ */
+ public static final String META_SERVER_PORT_KEY =
+ "sqoop.metastore.server.port";
+
+ /** Default to this port if unset. */
+ public static final int DEFAULT_PORT = 16000;
+
+ private int port;
+ private String fileLocation;
+ private Server server;
+ private Configuration conf;
+
+ public HsqldbMetaStore(Configuration config) {
+ this.conf = config;
+ init();
+ }
+
+ /**
+ * Determine the user's home directory and return a file path
+ * under this root where the shared metastore can be placed.
+ */
+ private String getHomeDirFilePath() {
+ String homeDir = System.getProperty("user.home");
+
+ File homeDirObj = new File(homeDir);
+ File sqoopDataDirObj = new File(homeDirObj, ".sqoop");
+ File databaseFileObj = new File(sqoopDataDirObj, "shared-metastore.db");
+
+ return databaseFileObj.toString();
+ }
+
+ private void init() {
+ if (null != server) {
+ LOG.debug("init(): server already exists.");
+ return;
+ }
+
+ fileLocation = conf.get(META_STORAGE_LOCATION_KEY, null);
+ if (null == fileLocation) {
+ fileLocation = getHomeDirFilePath();
+ LOG.warn("The location for metastore data has not been explicitly set. "
+ + "Placing shared metastore files in " + fileLocation);
+ }
+
+ this.port = conf.getInt(META_SERVER_PORT_KEY, DEFAULT_PORT);
+ }
+
+
+ public void start() {
+ try {
+ if (server != null) {
+ server.checkRunning(false);
+ }
+ } catch (RuntimeException re) {
+ LOG.info("Server is already started.");
+ return;
+ }
+
+ server = new Server();
+ server.setDatabasePath(0, "file:" + fileLocation);
+ server.setDatabaseName(0, "sqoop");
+ server.putPropertiesFromString("hsqldb.write_delay=false");
+ server.setPort(port);
+ server.setSilent(true);
+ server.setNoSystemExit(true);
+
+ server.start();
+ LOG.info("Server started on port " + port + " with protocol "
+ + server.getProtocol());
+ }
+
+ /**
+ * Blocks the current thread until the server is shut down.
+ */
+ public void waitForServer() {
+ while (true) {
+ int curState = server.getState();
+ if (curState == ServerConstants.SERVER_STATE_SHUTDOWN) {
+ LOG.info("Got shutdown notification");
+ break;
+ }
+
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException ie) {
+ LOG.info("Interrupted while blocking for server:"
+ + StringUtils.stringifyException(ie));
+ }
+ }
+ }
+
+ /**
+ * Connects to the server and instructs it to shutdown.
+ */
+ public void shutdown() {
+ // Send the SHUTDOWN command to the server via SQL.
+ SqoopOptions options = new SqoopOptions(conf);
+ options.setConnectString("jdbc:hsqldb:hsql://localhost:"
+ + port + "/sqoop");
+ options.setUsername("SA");
+ options.setPassword("");
+ HsqldbManager manager = new HsqldbManager(options);
+ Statement s = null;
+ try {
+ Connection c = manager.getConnection();
+ s = c.createStatement();
+ s.execute("SHUTDOWN");
+ } catch (SQLException sqlE) {
+ LOG.warn("Exception shutting down database: "
+ + StringUtils.stringifyException(sqlE));
+ } finally {
+ if (null != s) {
+ try {
+ s.close();
+ } catch (SQLException sqlE) {
+ LOG.warn("Error closing statement: " + sqlE);
+ }
+ }
+
+ try {
+ manager.close();
+ } catch (SQLException sqlE) {
+ LOG.warn("Error closing manager: " + sqlE);
+ }
+ }
+ }
+}
+
diff --git a/src/java/com/cloudera/sqoop/metastore/hsqldb/HsqldbSessionStorage.java b/src/java/com/cloudera/sqoop/metastore/hsqldb/HsqldbSessionStorage.java
new file mode 100644
index 00000000..09d9da9f
--- /dev/null
+++ b/src/java/com/cloudera/sqoop/metastore/hsqldb/HsqldbSessionStorage.java
@@ -0,0 +1,796 @@
+/**
+ * Licensed to Cloudera, Inc. under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Cloudera, Inc. licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package com.cloudera.sqoop.metastore.hsqldb;
+
+import java.io.IOException;
+
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.conf.Configuration;
+
+import com.cloudera.sqoop.SqoopOptions;
+
+import com.cloudera.sqoop.metastore.SessionData;
+import com.cloudera.sqoop.metastore.SessionStorage;
+
+import com.cloudera.sqoop.tool.SqoopTool;
+
+/**
+ * SessionStorage implementation that uses an HSQLDB-backed database to
+ * hold session information.
+ */
+public class HsqldbSessionStorage extends SessionStorage {
+
+ public static final Log LOG = LogFactory.getLog(
+ HsqldbSessionStorage.class.getName());
+
+ /** descriptor key identifying the connect string for the metastore. */
+ public static final String META_CONNECT_KEY = "metastore.connect.string";
+
+ /** descriptor key identifying the username to use when connecting
+ * to the metastore.
+ */
+ public static final String META_USERNAME_KEY = "metastore.username";
+
+ /** descriptor key identifying the password to use when connecting
+ * to the metastore.
+ */
+ public static final String META_PASSWORD_KEY = "metastore.password";
+
+
+ /** Default name for the root metadata table in HSQLDB. */
+ private static final String DEFAULT_ROOT_TABLE_NAME = "SQOOP_ROOT";
+
+ /** Configuration key used to override root table name. */
+ public static final String ROOT_TABLE_NAME_KEY =
+ "sqoop.hsqldb.root.table.name";
+
+ /** root metadata table key used to define the current schema version. */
+ private static final String STORAGE_VERSION_KEY =
+ "sqoop.hsqldb.session.storage.version";
+
+ /** The current version number for the schema edition. */
+ private static final int CUR_STORAGE_VERSION = 0;
+
+ /** root metadata table key used to define the session table name. */
+ private static final String SESSION_TABLE_KEY =
+ "sqoop.hsqldb.session.info.table";
+
+ /** Default value for SESSION_TABLE_KEY. */
+ private static final String DEFAULT_SESSION_TABLE_NAME =
+ "SQOOP_SESSIONS";
+
+ /** Per-session key with propClass 'schema' that defines the set of
+ * properties valid to be defined for propClass 'SqoopOptions'. */
+ private static final String PROPERTY_SET_KEY =
+ "sqoop.property.set.id";
+
+ /** Current value for PROPERTY_SET_KEY. */
+ private static final String CUR_PROPERTY_SET_ID = "0";
+
+ // The following are values for propClass in the v0 schema which
+ // describe different aspects of the stored metadata.
+
+ /** Property class for properties about the stored data itself. */
+ private static final String PROPERTY_CLASS_SCHEMA = "schema";
+
+ /** Property class for properties that are loaded into SqoopOptions. */
+ private static final String PROPERTY_CLASS_SQOOP_OPTIONS = "SqoopOptions";
+
+ /** Property class for properties that are loaded into a Configuration. */
+ private static final String PROPERTY_CLASS_CONFIG = "config";
+
+ /**
+ * Per-session key with propClass 'schema' that specifies the SqoopTool
+ * to load.
+ */
+ private static final String SQOOP_TOOL_KEY = "sqoop.tool";
+
+
+ private String metastoreConnectStr;
+ private String metastoreUser;
+ private String metastorePassword;
+ private Connection connection;
+
+ protected Connection getConnection() {
+ return this.connection;
+ }
+
+ // After connection to the database and initialization of the
+ // schema, this holds the name of the session table.
+ private String sessionTableName;
+
+ protected void setMetastoreConnectStr(String connectStr) {
+ this.metastoreConnectStr = connectStr;
+ }
+
+ protected void setMetastoreUser(String user) {
+ this.metastoreUser = user;
+ }
+
+ protected void setMetastorePassword(String pass) {
+ this.metastorePassword = pass;
+ }
+
+ private static final String DB_DRIVER_CLASS = "org.hsqldb.jdbcDriver";
+
+ @Override
+ /**
+ * Initialize the connection to the database.
+ */
+ public void open(Map descriptor) throws IOException {
+ setMetastoreConnectStr(descriptor.get(META_CONNECT_KEY));
+ setMetastoreUser(descriptor.get(META_USERNAME_KEY));
+ setMetastorePassword(descriptor.get(META_PASSWORD_KEY));
+
+ init();
+ }
+
+ protected void init() throws IOException {
+ try {
+ // Load/initialize the JDBC driver.
+ Class.forName(DB_DRIVER_CLASS);
+ } catch (ClassNotFoundException cnfe) {
+ throw new IOException("Could not load HSQLDB JDBC driver", cnfe);
+ }
+
+ try {
+ if (null == metastoreUser) {
+ this.connection = DriverManager.getConnection(metastoreConnectStr);
+ } else {
+ this.connection = DriverManager.getConnection(metastoreConnectStr,
+ metastoreUser, metastorePassword);
+ }
+
+ connection.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
+ connection.setAutoCommit(false);
+
+ // Initialize the root schema.
+ if (!rootTableExists()) {
+ createRootTable();
+ }
+
+ // Check the schema version.
+ String curStorageVerStr = getRootProperty(STORAGE_VERSION_KEY, null);
+ int actualStorageVer = -1;
+ try {
+ actualStorageVer = Integer.valueOf(curStorageVerStr);
+ } catch (NumberFormatException nfe) {
+ LOG.warn("Could not interpret as a number: " + curStorageVerStr);
+ }
+ if (actualStorageVer != CUR_STORAGE_VERSION) {
+ LOG.error("Can not interpret metadata schema");
+ LOG.error("The metadata schema version is " + curStorageVerStr);
+ LOG.error("The highest version supported is " + CUR_STORAGE_VERSION);
+ LOG.error("To use this version of Sqoop, "
+ + "you must downgrade your metadata schema.");
+ throw new IOException("Invalid metadata version.");
+ }
+
+ // Initialize the versioned schema.
+ initV0Schema();
+ } catch (SQLException sqle) {
+ if (null != connection) {
+ try {
+ connection.rollback();
+ } catch (SQLException e2) {
+ LOG.warn("Error rolling back transaction in error handler: " + e2);
+ }
+ }
+
+ throw new IOException("Exception creating SQL connection", sqle);
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (null != this.connection) {
+ try {
+ LOG.debug("Flushing current transaction");
+ this.connection.commit();
+ } catch (SQLException sqlE) {
+ throw new IOException("Exception committing connection", sqlE);
+ }
+
+ try {
+ LOG.debug("Closing connection");
+ this.connection.close();
+ } catch (SQLException sqlE) {
+ throw new IOException("Exception closing connection", sqlE);
+ } finally {
+ this.connection = null;
+ }
+ }
+ }
+
+ @Override
+ /** {@inheritDoc} */
+ public boolean canAccept(Map descriptor) {
+ // We return true if the desciptor contains a connect string to find
+ // the database.
+ return descriptor.get(META_CONNECT_KEY) != null;
+ }
+
+ @Override
+ /** {@inheritDoc} */
+ public SessionData read(String sessionName) throws IOException {
+ try {
+ if (!sessionExists(sessionName)) {
+ LOG.error("Cannot restore session: " + sessionName);
+ LOG.error("(No such session)");
+ throw new IOException("Cannot restore missing session " + sessionName);
+ }
+
+ LOG.debug("Restoring session: " + sessionName);
+ Properties schemaProps = getV0Properties(sessionName,
+ PROPERTY_CLASS_SCHEMA);
+ Properties sqoopOptProps = getV0Properties(sessionName,
+ PROPERTY_CLASS_SQOOP_OPTIONS);
+ Properties configProps = getV0Properties(sessionName,
+ PROPERTY_CLASS_CONFIG);
+
+ // Check that we're not using a saved session from a previous
+ // version whose functionality has been deprecated.
+ String thisPropSetId = schemaProps.getProperty(PROPERTY_SET_KEY);
+ LOG.debug("System property set: " + CUR_PROPERTY_SET_ID);
+ LOG.debug("Stored property set: " + thisPropSetId);
+ if (!CUR_PROPERTY_SET_ID.equals(thisPropSetId)) {
+ LOG.warn("The property set present in this database was written by");
+ LOG.warn("an incompatible version of Sqoop. This may result in an");
+ LOG.warn("incomplete operation.");
+ // TODO(aaron): Should this fail out-right?
+ }
+
+ String toolName = schemaProps.getProperty(SQOOP_TOOL_KEY);
+ if (null == toolName) {
+ // Don't know what tool to create.
+ throw new IOException("Incomplete metadata; missing "
+ + SQOOP_TOOL_KEY);
+ }
+
+ SqoopTool tool = SqoopTool.getTool(toolName);
+ if (null == tool) {
+ throw new IOException("Error in session metadata: invalid tool "
+ + toolName);
+ }
+
+ Configuration conf = new Configuration();
+ for (Map.Entry