diff --git a/src/java/org/apache/sqoop/mapreduce/DataDrivenImportJob.java b/src/java/org/apache/sqoop/mapreduce/DataDrivenImportJob.java index 708da5b5..bcf66111 100644 --- a/src/java/org/apache/sqoop/mapreduce/DataDrivenImportJob.java +++ b/src/java/org/apache/sqoop/mapreduce/DataDrivenImportJob.java @@ -103,7 +103,9 @@ protected void configureMapper(Job job, String tableName, // Parquet data records. The import will fail, if schema is invalid. Schema schema = generateAvroSchema(tableName); String uri = getKiteUri(conf, tableName); - ParquetJob.configureImportJob(conf, schema, uri, options.isAppendMode()); + boolean reuseExistingDataset = options.isAppendMode() || + (options.doHiveImport() && options.doOverwriteHiveTable()); + ParquetJob.configureImportJob(conf, schema, uri, reuseExistingDataset); } job.setMapperClass(getMapperClass()); diff --git a/src/java/org/apache/sqoop/mapreduce/ParquetJob.java b/src/java/org/apache/sqoop/mapreduce/ParquetJob.java index bea74c3a..85d88ffb 100644 --- a/src/java/org/apache/sqoop/mapreduce/ParquetJob.java +++ b/src/java/org/apache/sqoop/mapreduce/ParquetJob.java @@ -71,9 +71,9 @@ public static CompressionType getCompressionType(Configuration conf) { * {@link org.apache.avro.generic.GenericRecord}. */ public static void configureImportJob(Configuration conf, Schema schema, - String uri, boolean doAppend) throws IOException { + String uri, boolean reuseExistingDataset) throws IOException { Dataset dataset; - if (doAppend) { + if (reuseExistingDataset) { try { dataset = Datasets.load(uri); } catch (DatasetNotFoundException ex) {