mirror of
https://github.com/apache/sqoop.git
synced 2025-05-02 09:32:11 +08:00
SQOOP-225. Checkstyle module for detecting trailing white spaces.
This patch adds a checkstyle module to detect trailing white spaces. It also removed various current instances of trailing white spaces in the code. From: Ahmed Radwan <ahmed@cloudera.com> git-svn-id: https://svn.apache.org/repos/asf/incubator/sqoop/trunk@1150046 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
fe9c0666b9
commit
5eaafd3243
@ -60,7 +60,7 @@ public ConnFactory(Configuration conf) {
|
||||
instantiateFactories(conf);
|
||||
}
|
||||
|
||||
/** The sqoop-site.xml configuration property used to set the list of
|
||||
/** The sqoop-site.xml configuration property used to set the list of
|
||||
* available ManagerFactories.
|
||||
*/
|
||||
public static final String FACTORY_CLASS_NAMES_KEY =
|
||||
@ -69,7 +69,7 @@ public ConnFactory(Configuration conf) {
|
||||
// The default value for sqoop.connection.factories is the
|
||||
// name of the DefaultManagerFactory.
|
||||
static final String DEFAULT_FACTORY_CLASS_NAMES =
|
||||
DefaultManagerFactory.class.getName();
|
||||
DefaultManagerFactory.class.getName();
|
||||
|
||||
/** The list of ManagerFactory instances consulted by getManager().
|
||||
*/
|
||||
@ -199,7 +199,7 @@ private Configuration loadManagersFromConfDir(Configuration conf) {
|
||||
String confDirName = System.getenv("SQOOP_CONF_DIR");
|
||||
if (null == confDirName) {
|
||||
LOG.warn("$SQOOP_CONF_DIR has not been set in the environment. "
|
||||
+ "Cannot check for additional configuration.");
|
||||
+ "Cannot check for additional configuration.");
|
||||
return conf;
|
||||
}
|
||||
|
||||
|
@ -1345,7 +1345,7 @@ public boolean shouldUseCompression() {
|
||||
public void setUseCompression(boolean compress) {
|
||||
this.useCompression = compress;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @return the name of the compression codec to use when importing.
|
||||
* E.g. <code>org.apache.hadoop.io.compress.GzipCodec</code>.
|
||||
@ -1353,7 +1353,7 @@ public void setUseCompression(boolean compress) {
|
||||
public String getCompressionCodec() {
|
||||
return compressionCodec;
|
||||
}
|
||||
|
||||
|
||||
public void setCompressionCodec(String codec) {
|
||||
this.compressionCodec = codec;
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ public Iterator<RelatedOptions> iterator() {
|
||||
return optGroups.iterator();
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Flatten the different sets of related options into a single collection
|
||||
* of options.
|
||||
@ -102,7 +102,7 @@ public Options merge() {
|
||||
totalOpts++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return mergedOpts;
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ public class HBasePutProcessor implements Closeable, Configurable,
|
||||
|
||||
public HBasePutProcessor() {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void setConf(Configuration config) {
|
||||
|
@ -86,7 +86,7 @@ public static String toHiveType(int sqlType) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* @return true if a sql type can't be translated to a precise match
|
||||
* in Hive, and we have to cast it to something more generic.
|
||||
*/
|
||||
|
@ -85,14 +85,14 @@ private LobFile() {
|
||||
public static final int LATEST_LOB_VERSION = 0;
|
||||
static final char [] HEADER_ID_STR = { 'L', 'O', 'B' };
|
||||
|
||||
// Value for entryId to write to the beginning of an IndexSegment.
|
||||
// Value for entryId to write to the beginning of an IndexSegment.
|
||||
static final long SEGMENT_HEADER_ID = -1;
|
||||
|
||||
// Value for entryId to write before the finale.
|
||||
static final long SEGMENT_OFFSET_ID = -2;
|
||||
|
||||
// Value for entryID to write before the IndexTable
|
||||
static final long INDEX_TABLE_ID = -3;
|
||||
static final long INDEX_TABLE_ID = -3;
|
||||
|
||||
/**
|
||||
* Represents a header block in a LobFile. Can write a new header
|
||||
@ -111,7 +111,7 @@ private static class LobFileHeader implements Writable {
|
||||
public LobFileHeader() {
|
||||
this.version = LATEST_LOB_VERSION;
|
||||
this.startMark = new RecordStartMark();
|
||||
this.metaBlock = new MetaBlock();
|
||||
this.metaBlock = new MetaBlock();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -271,7 +271,7 @@ public MetaBlock(Map<String, BytesWritable> map) {
|
||||
entries.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Set<Map.Entry<String, BytesWritable>> entrySet() {
|
||||
return entries.entrySet();
|
||||
@ -409,7 +409,7 @@ public void addRecordLen(long recordLen) throws IOException {
|
||||
public void write(DataOutput out) throws IOException {
|
||||
// Write the SEGMENT_HEADER_ID to distinguish this from a LobRecord.
|
||||
WritableUtils.writeVLong(out, SEGMENT_HEADER_ID);
|
||||
|
||||
|
||||
// The length of the main body of the segment is the length of the
|
||||
// data byte array.
|
||||
int segmentBytesLen = recordLenBytes.getLength();
|
||||
@ -444,12 +444,12 @@ public void readFields(DataInput in) throws IOException {
|
||||
|
||||
reset(); // Reset the iterator allowing the user to yield offset/lengths.
|
||||
}
|
||||
|
||||
|
||||
|
||||
// The following methods are used by a Reader to walk through the index
|
||||
// segment and get data about the records described in this segment of
|
||||
// the index.
|
||||
|
||||
|
||||
private DataInputBuffer dataInputBuf;
|
||||
|
||||
// The following two fields are advanced by the next() method.
|
||||
@ -559,7 +559,7 @@ public long getCurRecordStart() {
|
||||
* Describes an IndexSegment. This is one entry in the IndexTable. It
|
||||
* holds the physical location of the IndexSegment in the file, as well
|
||||
* as the range of entryIds and byte ranges corresponding to records
|
||||
* described by the index subset in the IndexSegment.
|
||||
* described by the index subset in the IndexSegment.
|
||||
*/
|
||||
private static class IndexTableEntry implements Writable {
|
||||
private long segmentOffset;
|
||||
@ -792,16 +792,16 @@ private static class V0Writer extends Writer {
|
||||
// The LobIndex we are constructing.
|
||||
private LinkedList<IndexSegment> indexSegments;
|
||||
// Number of entries in the current IndexSegment.
|
||||
private int entriesInSegment;
|
||||
private int entriesInSegment;
|
||||
private IndexTable indexTable;
|
||||
|
||||
// Number of entries that can be written to a single IndexSegment.
|
||||
private int maxEntriesPerSegment;
|
||||
private int maxEntriesPerSegment;
|
||||
|
||||
// By default we write this many entries per IndexSegment.
|
||||
static final int DEFAULT_MAX_SEGMENT_ENTRIES = 4096;
|
||||
|
||||
// Our OutputStream to the underlying file.
|
||||
|
||||
// Our OutputStream to the underlying file.
|
||||
private DataOutputStream out;
|
||||
|
||||
// 'out' is layered on top of this stream, which gives us a count
|
||||
@ -848,7 +848,7 @@ private static class V0Writer extends Writer {
|
||||
this.compressor = codec.createCompressor();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
init();
|
||||
}
|
||||
|
||||
@ -1217,7 +1217,7 @@ private static class V0Reader extends Reader {
|
||||
private long claimedRecordLen;
|
||||
|
||||
// After we've aligned on a record, this contains its entryId.
|
||||
private long curEntryId;
|
||||
private long curEntryId;
|
||||
|
||||
// After we've aligned on a record, this contains the offset of the
|
||||
// beginning of its RSM from the start of the file.
|
||||
@ -1437,7 +1437,7 @@ private boolean matchesRsm(byte [] buf) {
|
||||
|
||||
/**
|
||||
* @return the offset in 'buf' where a RecordStartMark begins, or -1
|
||||
* if the RecordStartMark is not present in the buffer.
|
||||
* if the RecordStartMark is not present in the buffer.
|
||||
*/
|
||||
private int findRecordStartMark(byte [] buf) {
|
||||
byte [] rsm = this.header.getStartMark().getBytes();
|
||||
@ -1482,7 +1482,7 @@ private void searchForRecord(long start) throws IOException {
|
||||
LOG.debug("Looking for the first record at/after offset " + start);
|
||||
|
||||
// Scan through the IndexTable until we find the IndexSegment
|
||||
// that contains the offset.
|
||||
// that contains the offset.
|
||||
for (int i = 0; i < indexTable.size(); i++) {
|
||||
IndexTableEntry tableEntry = indexTable.get(i);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
@ -1495,7 +1495,7 @@ private void searchForRecord(long start) throws IOException {
|
||||
// Seek to the IndexSegment associated with this tableEntry.
|
||||
curIndexSegmentId = i;
|
||||
loadIndexSegment();
|
||||
|
||||
|
||||
// Use this index segment. The record index iterator
|
||||
// is at the beginning of the IndexSegment, since we just
|
||||
// read it in.
|
||||
@ -1525,7 +1525,7 @@ private void searchForRecord(long start) throws IOException {
|
||||
}
|
||||
|
||||
// If we didn't return inside the loop, then we've searched the entire
|
||||
// file and it's not there. Advance the IndexSegment iterator to
|
||||
// file and it's not there. Advance the IndexSegment iterator to
|
||||
// the end of the road so that next() returns false.
|
||||
this.curIndexSegmentId = indexTable.size();
|
||||
loadIndexSegment();
|
||||
@ -1614,7 +1614,7 @@ public boolean next() throws IOException {
|
||||
// Nothing left in the last IndexSegment.
|
||||
LOG.debug("Last index segment is finished; false.");
|
||||
this.curIndexSegment = null;
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Determine where the next record starts.
|
||||
@ -1761,7 +1761,7 @@ public static Reader open(Path p, Configuration conf) throws IOException {
|
||||
if (version == 0) {
|
||||
return new V0Reader(p, conf, header, dis, fis, stats[0].getLen());
|
||||
} else {
|
||||
throw new IOException("No reader available for LobFile version "
|
||||
throw new IOException("No reader available for LobFile version "
|
||||
+ version);
|
||||
}
|
||||
}
|
||||
|
@ -34,7 +34,7 @@
|
||||
* Singleton pattern. While nothing prevents multiple LobReaderCache
|
||||
* instances, it is most useful to have a single global cache. This cache is
|
||||
* internally synchronized; only one thread can insert or retrieve a reader
|
||||
* from the cache at a time.
|
||||
* from the cache at a time.
|
||||
*/
|
||||
public final class LobReaderCache {
|
||||
|
||||
@ -78,7 +78,7 @@ public static Path qualify(Path path, Configuration conf)
|
||||
}
|
||||
return path.makeQualified(fs);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Open a LobFile for read access, returning a cached reader if one is
|
||||
* available, or a new reader otherwise.
|
||||
|
@ -67,7 +67,7 @@ public void create() throws IOException {
|
||||
public void create(int permissions) throws IOException {
|
||||
String filename = fifoFile.toString();
|
||||
|
||||
// Format permissions as a mode string in base 8.
|
||||
// Format permissions as a mode string in base 8.
|
||||
String modeStr = Integer.toString(permissions, 8);
|
||||
|
||||
// Create the FIFO itself.
|
||||
|
@ -29,7 +29,7 @@ public class DelimiterSet implements Cloneable {
|
||||
private char recordDelim; // records terminated by this.
|
||||
|
||||
// If these next two fields are '\000', then they are ignored.
|
||||
private char enclosedBy;
|
||||
private char enclosedBy;
|
||||
private char escapedBy;
|
||||
|
||||
// If true, then the enclosed-by character is applied to every
|
||||
|
@ -27,7 +27,7 @@
|
||||
public interface FieldMappable {
|
||||
|
||||
/**
|
||||
* Returns a map containing all fields of this record.
|
||||
* Returns a map containing all fields of this record.
|
||||
* @return a map from column names to the object-based values for
|
||||
* this record. The map may not be null, though it may be empty.
|
||||
*/
|
||||
|
@ -58,7 +58,7 @@ public class LargeObjectLoader implements Closeable {
|
||||
private Path workPath;
|
||||
private FileSystem fs;
|
||||
|
||||
// Handles to the open BLOB / CLOB file writers.
|
||||
// Handles to the open BLOB / CLOB file writers.
|
||||
private LobFile.Writer curBlobWriter;
|
||||
private LobFile.Writer curClobWriter;
|
||||
|
||||
@ -155,7 +155,7 @@ private LobFile.Writer getClobWriter() throws IOException {
|
||||
*/
|
||||
private String getRelativePath(LobFile.Writer w) {
|
||||
Path writerPath = w.getPath();
|
||||
|
||||
|
||||
String writerPathStr = writerPath.toString();
|
||||
String workPathStr = workPath.toString();
|
||||
if (!workPathStr.endsWith(File.separator)) {
|
||||
|
@ -67,7 +67,7 @@ private enum ParseState {
|
||||
|
||||
/**
|
||||
* An error thrown when parsing fails.
|
||||
*/
|
||||
*/
|
||||
public static class ParseError extends Exception {
|
||||
public ParseError() {
|
||||
super("ParseError");
|
||||
@ -112,8 +112,8 @@ public List<String> parseRecord(CharSequence input) throws ParseError {
|
||||
* This list is backed by an internal buffer which is cleared by the
|
||||
* next call to parseRecord().
|
||||
*/
|
||||
public List<String> parseRecord(Text input) throws ParseError {
|
||||
if (null == input) {
|
||||
public List<String> parseRecord(Text input) throws ParseError {
|
||||
if (null == input) {
|
||||
throw new ParseError("null input string");
|
||||
}
|
||||
|
||||
@ -286,7 +286,7 @@ record sep halts processing.
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
|
||||
case ENCLOSED_ESCAPE:
|
||||
// Treat this character literally, whatever it is, and return to
|
||||
// enclosed field processing.
|
||||
@ -342,7 +342,7 @@ record sep halts processing.
|
||||
}
|
||||
// CHECKSTYLE:ON
|
||||
|
||||
public boolean isEnclosingRequired() {
|
||||
public boolean isEnclosingRequired() {
|
||||
return delimiters.isEncloseRequired();
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ public abstract void loadLargeObjects(LargeObjectLoader objLoader)
|
||||
|
||||
/**
|
||||
* Inserts the data in this object into the PreparedStatement, starting
|
||||
* at parameter 'offset'.
|
||||
* at parameter 'offset'.
|
||||
* @return the number of fields written to the statement.
|
||||
*/
|
||||
public abstract int write(PreparedStatement stmt, int offset)
|
||||
@ -130,7 +130,7 @@ public void delegate(FieldMapProcessor processor)
|
||||
*/
|
||||
public Map<String, Object> getFieldMap() {
|
||||
// Default implementation does not support field iteration.
|
||||
// ClassWriter should provide an overriding version.
|
||||
// ClassWriter should provide an overriding version.
|
||||
throw new RuntimeException(
|
||||
"Got null field map from record. Regenerate your record class.");
|
||||
}
|
||||
|
@ -40,7 +40,7 @@
|
||||
* enough commands are created.
|
||||
*
|
||||
* This supports a configurable "spill threshold" at which
|
||||
* point intermediate transactions are committed.
|
||||
* point intermediate transactions are committed.
|
||||
*
|
||||
* Uses DBOutputFormat/DBConfiguration for configuring the output.
|
||||
* This is used in conjunction with the abstract AsyncSqlRecordWriter
|
||||
@ -49,7 +49,7 @@
|
||||
* Clients of this OutputFormat must implement getRecordWriter(); the
|
||||
* returned RecordWriter is intended to subclass AsyncSqlRecordWriter.
|
||||
*/
|
||||
public abstract class AsyncSqlOutputFormat<K extends SqoopRecord, V>
|
||||
public abstract class AsyncSqlOutputFormat<K extends SqoopRecord, V>
|
||||
extends OutputFormat<K, V> {
|
||||
|
||||
/** conf key: number of rows to export per INSERT statement. */
|
||||
@ -87,13 +87,13 @@ public abstract class AsyncSqlOutputFormat<K extends SqoopRecord, V>
|
||||
|
||||
@Override
|
||||
/** {@inheritDoc} */
|
||||
public void checkOutputSpecs(JobContext context)
|
||||
public void checkOutputSpecs(JobContext context)
|
||||
throws IOException, InterruptedException {
|
||||
}
|
||||
|
||||
@Override
|
||||
/** {@inheritDoc} */
|
||||
public OutputCommitter getOutputCommitter(TaskAttemptContext context)
|
||||
public OutputCommitter getOutputCommitter(TaskAttemptContext context)
|
||||
throws IOException, InterruptedException {
|
||||
return new NullOutputCommitter();
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ public abstract class AsyncSqlRecordWriter<K extends SqoopRecord, V>
|
||||
private Configuration conf;
|
||||
|
||||
protected final int rowsPerStmt; // rows to insert per statement.
|
||||
|
||||
|
||||
// Buffer for records to be put into export SQL statements.
|
||||
private List<SqoopRecord> records;
|
||||
|
||||
@ -85,7 +85,7 @@ public AsyncSqlRecordWriter(TaskAttemptContext context)
|
||||
* Allow subclasses access to the Connection instance we hold.
|
||||
* This Connection is shared with the asynchronous SQL exec thread.
|
||||
* Any uses of the Connection must be synchronized on it.
|
||||
* @return the Connection object used for this SQL transaction.
|
||||
* @return the Connection object used for this SQL transaction.
|
||||
*/
|
||||
protected final Connection getConnection() {
|
||||
return this.connection;
|
||||
|
@ -49,7 +49,7 @@ public class CombineShimRecordReader
|
||||
private int index;
|
||||
private RecordReader<LongWritable, Object> rr;
|
||||
|
||||
/**
|
||||
/**
|
||||
* Constructor invoked by CombineFileRecordReader that identifies part of a
|
||||
* CombineFileSplit to use.
|
||||
*/
|
||||
|
@ -51,7 +51,7 @@
|
||||
* <p>If the FMP implements Configurable, it will be configured
|
||||
* correctly via ReflectionUtils.</p>
|
||||
*/
|
||||
public class DelegatingOutputFormat<K extends FieldMappable, V>
|
||||
public class DelegatingOutputFormat<K extends FieldMappable, V>
|
||||
extends OutputFormat<K, V> {
|
||||
|
||||
/** conf key: the FieldMapProcessor class to instantiate. */
|
||||
@ -60,7 +60,7 @@ public class DelegatingOutputFormat<K extends FieldMappable, V>
|
||||
|
||||
@Override
|
||||
/** {@inheritDoc} */
|
||||
public void checkOutputSpecs(JobContext context)
|
||||
public void checkOutputSpecs(JobContext context)
|
||||
throws IOException, InterruptedException {
|
||||
Configuration conf = context.getConfiguration();
|
||||
|
||||
@ -71,14 +71,14 @@ public void checkOutputSpecs(JobContext context)
|
||||
|
||||
@Override
|
||||
/** {@inheritDoc} */
|
||||
public OutputCommitter getOutputCommitter(TaskAttemptContext context)
|
||||
public OutputCommitter getOutputCommitter(TaskAttemptContext context)
|
||||
throws IOException, InterruptedException {
|
||||
return new NullOutputCommitter();
|
||||
}
|
||||
|
||||
@Override
|
||||
/** {@inheritDoc} */
|
||||
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context)
|
||||
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context)
|
||||
throws IOException {
|
||||
try {
|
||||
return new DelegatingRecordWriter(context);
|
||||
|
@ -87,7 +87,7 @@ protected void configureOutputFormat(Job job, String tableName,
|
||||
|
||||
if (options.shouldUseCompression()) {
|
||||
FileOutputFormat.setCompressOutput(job, true);
|
||||
|
||||
|
||||
String codecName = options.getCompressionCodec();
|
||||
Class<? extends CompressionCodec> codecClass;
|
||||
if (codecName == null) {
|
||||
@ -97,13 +97,13 @@ protected void configureOutputFormat(Job job, String tableName,
|
||||
codecClass = CodecMap.getCodec(codecName, conf).getClass();
|
||||
}
|
||||
FileOutputFormat.setOutputCompressorClass(job, codecClass);
|
||||
|
||||
|
||||
if (options.getFileLayout() == SqoopOptions.FileLayout.SequenceFile) {
|
||||
SequenceFileOutputFormat.setOutputCompressionType(job,
|
||||
CompressionType.BLOCK);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Path outputPath = context.getDestination();
|
||||
FileOutputFormat.setOutputPath(job, outputPath);
|
||||
}
|
||||
|
@ -41,12 +41,12 @@
|
||||
*/
|
||||
public class MergeMapperBase<INKEY, INVAL>
|
||||
extends Mapper<INKEY, INVAL, Text, MergeRecord> {
|
||||
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(
|
||||
MergeMapperBase.class.getName());
|
||||
|
||||
private String keyColName; // name of the key column.
|
||||
private boolean isNew; // true if this split is from the new dataset.
|
||||
private boolean isNew; // true if this split is from the new dataset.
|
||||
|
||||
@Override
|
||||
protected void setup(Context context)
|
||||
|
@ -72,9 +72,9 @@ public Configuration getConf() {
|
||||
/** @return true if this record came from the "new" dataset. */
|
||||
public boolean isNewRecord() {
|
||||
return isNew;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Set the isNew field to 'newVal'.
|
||||
*/
|
||||
public void setNewRecord(boolean newVal) {
|
||||
|
@ -40,7 +40,7 @@ public class MergeTextMapper extends MergeMapperBase<LongWritable, Text> {
|
||||
protected void setup(Context c) throws IOException, InterruptedException {
|
||||
Configuration conf = c.getConfiguration();
|
||||
|
||||
Class<? extends SqoopRecord> recordClass =
|
||||
Class<? extends SqoopRecord> recordClass =
|
||||
(Class<? extends SqoopRecord>) conf.getClass(
|
||||
MergeJob.MERGE_SQOOP_RECORD_KEY, SqoopRecord.class);
|
||||
this.record = ReflectionUtils.newInstance(recordClass, conf);
|
||||
|
@ -100,7 +100,7 @@ protected void configureInputFormat(Job job, String tableName,
|
||||
DataDrivenDBInputFormat.setInput(job, DBWritable.class,
|
||||
tableName, null, null, sqlColNames);
|
||||
|
||||
// Configure the actual InputFormat to use.
|
||||
// Configure the actual InputFormat to use.
|
||||
super.configureInputFormat(job, tableName, tableClassName, splitByCol);
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@
|
||||
|
||||
/**
|
||||
* Mapper that starts a 'mysqlimport' process and uses that to export rows from
|
||||
* HDFS to a MySQL database at high speed.
|
||||
* HDFS to a MySQL database at high speed.
|
||||
*
|
||||
* map() methods are actually provided by subclasses that read from
|
||||
* SequenceFiles (containing existing SqoopRecords) or text files
|
||||
|
@ -33,7 +33,7 @@ public class MySQLTextExportMapper
|
||||
extends MySQLExportMapper<LongWritable, Text> {
|
||||
|
||||
// End-of-record delimiter.
|
||||
private String recordEndStr;
|
||||
private String recordEndStr;
|
||||
|
||||
@Override
|
||||
protected void setup(Context context) {
|
||||
|
@ -29,12 +29,12 @@
|
||||
/**
|
||||
* Oracle-specific SQL formatting overrides default ExportOutputFormat's.
|
||||
*/
|
||||
public class OracleExportOutputFormat<K extends SqoopRecord, V>
|
||||
public class OracleExportOutputFormat<K extends SqoopRecord, V>
|
||||
extends ExportOutputFormat<K, V> {
|
||||
|
||||
@Override
|
||||
/** {@inheritDoc} */
|
||||
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context)
|
||||
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context)
|
||||
throws IOException {
|
||||
try {
|
||||
return new OracleExportRecordWriter(context);
|
||||
|
@ -44,14 +44,14 @@
|
||||
*
|
||||
* Uses DBOutputFormat/DBConfiguration for configuring the output.
|
||||
*/
|
||||
public class UpdateOutputFormat<K extends SqoopRecord, V>
|
||||
public class UpdateOutputFormat<K extends SqoopRecord, V>
|
||||
extends AsyncSqlOutputFormat<K, V> {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(UpdateOutputFormat.class);
|
||||
|
||||
@Override
|
||||
/** {@inheritDoc} */
|
||||
public void checkOutputSpecs(JobContext context)
|
||||
public void checkOutputSpecs(JobContext context)
|
||||
throws IOException, InterruptedException {
|
||||
Configuration conf = context.getConfiguration();
|
||||
DBConfiguration dbConf = new DBConfiguration(conf);
|
||||
@ -71,7 +71,7 @@ public void checkOutputSpecs(JobContext context)
|
||||
|
||||
@Override
|
||||
/** {@inheritDoc} */
|
||||
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context)
|
||||
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext context)
|
||||
throws IOException {
|
||||
try {
|
||||
return new UpdateRecordWriter(context);
|
||||
@ -126,7 +126,7 @@ protected final String getTableName() {
|
||||
return Arrays.copyOf(columnNames, columnNames.length);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @return the column we are using to determine the row to update.
|
||||
*/
|
||||
|
@ -45,7 +45,7 @@ public class DataDrivenDBRecordReader<T extends DBWritable>
|
||||
// TODO(aaron): Refactor constructor to use fewer arguments.
|
||||
/**
|
||||
* @param split The InputSplit to read data for
|
||||
* @throws SQLException
|
||||
* @throws SQLException
|
||||
*/
|
||||
public DataDrivenDBRecordReader(DBInputFormat.DBInputSplit split,
|
||||
Class<T> inputClass, Configuration conf, Connection conn,
|
||||
|
@ -40,7 +40,7 @@ public class OracleDBRecordReader<T extends DBWritable>
|
||||
private static final Log LOG = LogFactory.getLog(OracleDBRecordReader.class);
|
||||
|
||||
// CHECKSTYLE:OFF
|
||||
public OracleDBRecordReader(DBInputFormat.DBInputSplit split,
|
||||
public OracleDBRecordReader(DBInputFormat.DBInputSplit split,
|
||||
Class<T> inputClass, Configuration conf, Connection conn,
|
||||
DBConfiguration dbConfig, String cond, String [] fields,
|
||||
String table) throws SQLException {
|
||||
@ -60,14 +60,14 @@ protected String getSelectQuery() {
|
||||
// Oracle-specific codepath to use rownum instead of LIMIT/OFFSET.
|
||||
if(dbConf.getInputQuery() == null) {
|
||||
query.append("SELECT ");
|
||||
|
||||
|
||||
for (int i = 0; i < fieldNames.length; i++) {
|
||||
query.append(fieldNames[i]);
|
||||
if (i != fieldNames.length -1) {
|
||||
query.append(", ");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
query.append(" FROM ").append(tableName);
|
||||
if (conditions != null && conditions.length() > 0) {
|
||||
query.append(" WHERE ").append(conditions);
|
||||
@ -80,7 +80,7 @@ protected String getSelectQuery() {
|
||||
//PREBUILT QUERY
|
||||
query.append(dbConf.getInputQuery());
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
DBInputFormat.DBInputSplit split = getSplit();
|
||||
if (split.getLength() > 0 && split.getStart() > 0) {
|
||||
|
@ -78,7 +78,7 @@ public abstract void create(String jobName, JobData data)
|
||||
*/
|
||||
public abstract void update(String jobName, JobData data)
|
||||
throws IOException;
|
||||
|
||||
|
||||
/**
|
||||
* Close any resources opened by the JobStorage system.
|
||||
*/
|
||||
|
@ -36,7 +36,7 @@ public class AutoHsqldbStorage extends HsqldbJobStorage {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(
|
||||
AutoHsqldbStorage.class.getName());
|
||||
|
||||
|
||||
/**
|
||||
* Configuration key specifying whether this storage agent is active.
|
||||
* Defaults to "on" to allow zero-conf local users.
|
||||
|
@ -51,9 +51,9 @@ public class HsqldbJobStorage extends JobStorage {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(
|
||||
HsqldbJobStorage.class.getName());
|
||||
|
||||
|
||||
/** descriptor key identifying the connect string for the metastore. */
|
||||
public static final String META_CONNECT_KEY = "metastore.connect.string";
|
||||
public static final String META_CONNECT_KEY = "metastore.connect.string";
|
||||
|
||||
/** descriptor key identifying the username to use when connecting
|
||||
* to the metastore.
|
||||
@ -94,14 +94,14 @@ public class HsqldbJobStorage extends JobStorage {
|
||||
"sqoop.property.set.id";
|
||||
|
||||
/** Current value for PROPERTY_SET_KEY. */
|
||||
private static final String CUR_PROPERTY_SET_ID = "0";
|
||||
private static final String CUR_PROPERTY_SET_ID = "0";
|
||||
|
||||
// The following are values for propClass in the v0 schema which
|
||||
// describe different aspects of the stored metadata.
|
||||
|
||||
/** Property class for properties about the stored data itself. */
|
||||
private static final String PROPERTY_CLASS_SCHEMA = "schema";
|
||||
|
||||
|
||||
/** Property class for properties that are loaded into SqoopOptions. */
|
||||
private static final String PROPERTY_CLASS_SQOOP_OPTIONS = "SqoopOptions";
|
||||
|
||||
@ -636,7 +636,7 @@ private void createJobTable() throws SQLException {
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// curTableName contains a table name that does not exist.
|
||||
// Create this table.
|
||||
|
@ -713,7 +713,7 @@ private void generateSetField(Map<String, Integer> columnTypes,
|
||||
if (!first) {
|
||||
sb.append(" else");
|
||||
}
|
||||
|
||||
|
||||
sb.append(" if (\"" + colName + "\".equals(__fieldName)) {\n");
|
||||
sb.append(" this." + colName + " = (" + javaType
|
||||
+ ") __fieldVal;\n");
|
||||
|
@ -224,7 +224,7 @@ public void compile() throws IOException {
|
||||
}
|
||||
}
|
||||
try {
|
||||
FileUtils.moveFile(fOrig, fDest);
|
||||
FileUtils.moveFile(fOrig, fDest);
|
||||
} catch (IOException e) {
|
||||
LOG.error("Could not rename " + orig + " to " + dest, e);
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ public String getClassForTable(String tableName) {
|
||||
|
||||
/**
|
||||
* @return just the last segment of the class name -- all package info
|
||||
* stripped.
|
||||
* stripped.
|
||||
*/
|
||||
public String getShortClassForTable(String tableName) {
|
||||
String fullClass = getClassForTable(tableName);
|
||||
|
@ -706,7 +706,7 @@ public void applyOptions(CommandLine in, SqoopOptions out)
|
||||
if (in.hasOption(COMPRESS_ARG)) {
|
||||
out.setUseCompression(true);
|
||||
}
|
||||
|
||||
|
||||
if (in.hasOption(COMPRESSION_CODEC_ARG)) {
|
||||
out.setCompressionCodec(in.getOptionValue(COMPRESSION_CODEC_ARG));
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ private int configureChildTool(SqoopOptions childOptions,
|
||||
|
||||
// The '--' and any subsequent args.
|
||||
String [] extraChildArgv = getElementsAfterDoubleDash(childArgv);
|
||||
|
||||
|
||||
// Now feed the arguments into the tool itself.
|
||||
try {
|
||||
childOptions = childTool.parseArguments(parseableChildArgv,
|
||||
@ -253,7 +253,7 @@ private int showJob(SqoopOptions opts) throws IOException {
|
||||
|
||||
// TODO: This does not show entries in the Configuration
|
||||
// (SqoopOptions.getConf()) which were stored as different from the
|
||||
// default.
|
||||
// default.
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -392,9 +392,9 @@ public void printHelp(ToolOptions opts) {
|
||||
System.out.println("usage: sqoop " + getToolName()
|
||||
+ " [GENERIC-ARGS] [JOB-ARGS] [-- [<tool-name>] [TOOL-ARGS]]");
|
||||
System.out.println("");
|
||||
|
||||
|
||||
opts.printHelp();
|
||||
|
||||
|
||||
System.out.println("");
|
||||
System.out.println("Generic Hadoop command-line arguments:");
|
||||
System.out.println("(must preceed any tool-specific arguments)");
|
||||
|
@ -115,7 +115,7 @@ protected RelatedOptions getMergeOptions() {
|
||||
.hasArg().withDescription("Key column to use to join results")
|
||||
.withLongOpt(MERGE_KEY_ARG)
|
||||
.create());
|
||||
|
||||
|
||||
// Since the "common" options aren't used in the merge tool,
|
||||
// add these settings here.
|
||||
mergeOpts.addOption(OptionBuilder
|
||||
|
@ -41,7 +41,7 @@ public class MetastoreTool extends BaseSqoopTool {
|
||||
private HsqldbMetaStore metastore;
|
||||
|
||||
// If set to true, shut an existing metastore down.
|
||||
private boolean shutdown = false;
|
||||
private boolean shutdown = false;
|
||||
|
||||
public MetastoreTool() {
|
||||
super("metastore");
|
||||
|
@ -172,7 +172,7 @@ private static Configuration loadPluginsFromConfDir(Configuration conf) {
|
||||
String confDirName = System.getenv("SQOOP_CONF_DIR");
|
||||
if (null == confDirName) {
|
||||
LOG.warn("$SQOOP_CONF_DIR has not been set in the environment. "
|
||||
+ "Cannot check for additional configuration.");
|
||||
+ "Cannot check for additional configuration.");
|
||||
return conf;
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,7 @@ public final class ToolDesc {
|
||||
|
||||
|
||||
/**
|
||||
* Main c'tor; sets all fields that describe a SqoopTool.
|
||||
* Main c'tor; sets all fields that describe a SqoopTool.
|
||||
*/
|
||||
public ToolDesc(String name, Class<? extends SqoopTool> cls, String desc) {
|
||||
this.toolName = name;
|
||||
|
@ -218,7 +218,7 @@ private String getFileExtension(String filename) {
|
||||
|
||||
/**
|
||||
* Creates a unique path object inside the sqoop temporary directory.
|
||||
*
|
||||
*
|
||||
* @param tableName
|
||||
* @return a path pointing to the temporary directory
|
||||
*/
|
||||
|
@ -27,7 +27,7 @@
|
||||
* When the stream is closed, the thread should terminate.
|
||||
*/
|
||||
public abstract class AsyncSink {
|
||||
|
||||
|
||||
/**
|
||||
* Create and run a thread to handle input from the provided InputStream.
|
||||
* When processStream returns, the thread should be running; it should
|
||||
|
@ -32,7 +32,7 @@
|
||||
*
|
||||
*/
|
||||
public final class Executor {
|
||||
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(Executor.class.getName());
|
||||
|
||||
private Executor() {
|
||||
@ -51,7 +51,7 @@ public static int exec(String [] args) throws IOException {
|
||||
/**
|
||||
* Run a command via Runtime.exec(), with its stdout and stderr streams
|
||||
* directed to be handled by threads generated by AsyncSinks.
|
||||
* Block until the child process terminates.
|
||||
* Block until the child process terminates.
|
||||
*
|
||||
* @return the exit status of the ran program
|
||||
*/
|
||||
@ -78,7 +78,7 @@ public static int exec(String [] args, String [] envp, AsyncSink outSink,
|
||||
// dispatch its stdout and stderr to stream sinks if available.
|
||||
if (null != outSink) {
|
||||
outSink.processStream(p.getInputStream());
|
||||
}
|
||||
}
|
||||
|
||||
if (null != errSink) {
|
||||
errSink.processStream(p.getErrorStream());
|
||||
|
@ -118,7 +118,7 @@ public void runExport(String connectStr, String username) throws Exception {
|
||||
options.setLinesTerminatedBy('\n');
|
||||
options.setFieldsTerminatedBy(',');
|
||||
options.setExplicitDelims(true);
|
||||
|
||||
|
||||
SqoopTool exportTool = new ExportTool();
|
||||
Sqoop sqoop = new Sqoop(exportTool, getConf(), options);
|
||||
int ret = Sqoop.runSqoop(sqoop, new String[0]);
|
||||
|
@ -48,7 +48,7 @@ public LobFileStressTest() {
|
||||
private Path getPath(boolean compress) {
|
||||
if (compress) {
|
||||
return new Path("compressed.lob");
|
||||
} else {
|
||||
} else {
|
||||
return new Path("integers.lob");
|
||||
}
|
||||
}
|
||||
@ -76,7 +76,7 @@ private int getNumRecords(boolean compress) {
|
||||
return 5000000; // 5 million; the compressor is just too slow for 40M.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void writeIntegerFile(boolean compress) throws Exception {
|
||||
boolean passed = false;
|
||||
try {
|
||||
@ -297,7 +297,7 @@ private void checkBigRecord(LobFile.Reader r, long expectedId)
|
||||
throw new Exception("Couldn't read all the data! expected "
|
||||
+ expected + " more bytes");
|
||||
}
|
||||
|
||||
|
||||
if (is.read() != -1) {
|
||||
throw new Exception("Got an extra byte! Expected no more data.");
|
||||
}
|
||||
@ -305,7 +305,7 @@ private void checkBigRecord(LobFile.Reader r, long expectedId)
|
||||
|
||||
private void testBigFile(boolean compress) throws Exception {
|
||||
// Write a file containing 5 GB records.
|
||||
|
||||
|
||||
final int NUM_RECORDS = 5;
|
||||
boolean passed = false;
|
||||
|
||||
|
@ -131,6 +131,12 @@
|
||||
<property name="tokens" value="COMMA, SEMI"/>
|
||||
</module>
|
||||
|
||||
<module name="Regexp">
|
||||
<property name="format" value="[ \t]+$"/>
|
||||
<property name="illegalPattern" value="true"/>
|
||||
<property name="message" value="Trailing whitespace"/>
|
||||
</module>
|
||||
|
||||
<!-- Modifier Checks -->
|
||||
<!-- See http://checkstyle.sf.net/config_modifiers.html -->
|
||||
<module name="ModifierOrder"/>
|
||||
|
@ -55,7 +55,7 @@ public class TestAppendUtils extends ImportJobTestCase {
|
||||
|
||||
/**
|
||||
* Create the argv to pass to Sqoop.
|
||||
*
|
||||
*
|
||||
* @return the argv as an array of strings.
|
||||
*/
|
||||
protected ArrayList getOutputlessArgv(boolean includeHadoopFlags,
|
||||
@ -143,7 +143,7 @@ public int compare(FileStatus fs1, FileStatus fs2) {
|
||||
return fs1.getPath().toString().compareTo(fs2.getPath().toString());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** @return a concat. string with file-creation dates excluding folders. */
|
||||
private String getFileCreationTimeImage(FileSystem fs, Path outputPath,
|
||||
int fileCount) throws IOException {
|
||||
@ -175,7 +175,7 @@ private int getFilePartition(Path file) {
|
||||
/**
|
||||
* Test for ouput path file-count increase, current files untouched and new
|
||||
* correct partition number.
|
||||
*
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
public void runAppendTest(ArrayList args, Path outputPath)
|
||||
|
@ -112,7 +112,7 @@ public void runSequenceFileCompressionTest(CompressionCodec codec,
|
||||
getTableName());
|
||||
|
||||
reader = SeqFileReader.getSeqFileReader(getDataFilePath().toString());
|
||||
|
||||
|
||||
if (codec == null) {
|
||||
codec = new GzipCodec();
|
||||
}
|
||||
@ -150,11 +150,11 @@ public void runSequenceFileCompressionTest(CompressionCodec codec,
|
||||
|
||||
public void runTextCompressionTest(CompressionCodec codec, int expectedNum)
|
||||
throws IOException {
|
||||
|
||||
|
||||
String [] columns = HsqldbTestServer.getFieldNames();
|
||||
String [] argv = getArgv(true, columns, codec, "--as-textfile");
|
||||
runImport(argv);
|
||||
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
|
||||
conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.sql.Connection;
|
||||
import java.sql.ResultSet;
|
||||
@ -111,7 +111,7 @@ public void close() {
|
||||
}
|
||||
|
||||
public String [] listDatabases() {
|
||||
return null;
|
||||
return null;
|
||||
}
|
||||
|
||||
public String [] listTables() {
|
||||
|
@ -57,7 +57,7 @@ public void setUp() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected String getTablePrefix() {
|
||||
return "UPDATE_TABLE_";
|
||||
@ -107,7 +107,7 @@ private void populateDatabase(int numRows) throws SQLException {
|
||||
* the same.
|
||||
* @param startOffsets is an optional list of row ids/values for a/c
|
||||
* which are the record ids at which the update files begin.
|
||||
* For instance, if numFiles=3, updatesPerFile=2, and keyCol=0 then
|
||||
* For instance, if numFiles=3, updatesPerFile=2, and keyCol=0 then
|
||||
* if startOffsets is {5, 10, 12}, files will be generated to update
|
||||
* rows with A=5,6; A=10,11; A=12,13.
|
||||
*
|
||||
@ -132,7 +132,7 @@ private void createUpdateFiles(int numFiles, int updatesPerFile,
|
||||
// Otherwise, just carry over from the previous file iteration.
|
||||
rowId = startOffsets[i];
|
||||
}
|
||||
|
||||
|
||||
for (int j = 0; j < updatesPerFile; j++) {
|
||||
w.write(getUpdateStringForRow(keyCol, rowId++));
|
||||
}
|
||||
@ -353,7 +353,7 @@ public void testSubsetUpdate2() throws Exception {
|
||||
// Update only some of the rows in the db. Also include some
|
||||
// updates that do not affect actual rows in the table.
|
||||
// These should just be ignored.
|
||||
|
||||
|
||||
populateDatabase(10);
|
||||
// Create two files that update four rows each.
|
||||
// File0 updates A=-2..1 (-2 and -1 don't exist).
|
||||
|
@ -43,7 +43,7 @@ public class TestTargetDir extends ImportJobTestCase {
|
||||
|
||||
/**
|
||||
* Create the argv to pass to Sqoop.
|
||||
*
|
||||
*
|
||||
* @return the argv as an array of strings.
|
||||
*/
|
||||
protected ArrayList getOutputArgv(boolean includeHadoopFlags) {
|
||||
|
@ -158,7 +158,7 @@ private void runImportTest(String tableName, String [] types,
|
||||
|
||||
// create a table and populate it with a row...
|
||||
createTableWithColTypes(types, values);
|
||||
|
||||
|
||||
// set up our mock hive shell to compare our generated script
|
||||
// against the correct expected one.
|
||||
SqoopOptions options = getSqoopOptions(args, tool);
|
||||
|
@ -91,7 +91,7 @@ public void setUp() throws Exception {
|
||||
|
||||
private void verifyClobFile(Path p, String... expectedRecords)
|
||||
throws Exception {
|
||||
|
||||
|
||||
LobFile.Reader reader = LobFile.open(p, conf);
|
||||
|
||||
int recNum = 0;
|
||||
@ -226,7 +226,7 @@ private void runLineAndRecordTest(Path p, String firstLine,
|
||||
String s = new String(chars);
|
||||
assertEquals(records[1], s);
|
||||
|
||||
// Close the reader before we consume the entire file.
|
||||
// Close the reader before we consume the entire file.
|
||||
reader.close();
|
||||
assertFalse(reader.isRecordAvailable());
|
||||
}
|
||||
@ -309,7 +309,7 @@ public void testSeekToRecord() throws Exception {
|
||||
char [] chars = buf.array();
|
||||
String s = new String(chars);
|
||||
assertEquals(records[2], s);
|
||||
|
||||
|
||||
r.close();
|
||||
reader.close();
|
||||
}
|
||||
|
@ -248,7 +248,7 @@ public void testSplittingTextFile() throws IOException {
|
||||
try {
|
||||
fis.close();
|
||||
} catch (IOException ioe) {
|
||||
// ignored; may be generated because fis closed in verifyFileContents.
|
||||
// ignored; may be generated because fis closed in verifyFileContents.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -134,7 +134,7 @@ private void doExternalTest(final String data, final String filename)
|
||||
w.close();
|
||||
lw.close();
|
||||
|
||||
String refString = "externalLob(lf," + filename
|
||||
String refString = "externalLob(lf," + filename
|
||||
+ "," + off + "," + len + ")";
|
||||
ClobRef clob = ClobRef.parse(refString);
|
||||
assertTrue(clob.isExternal());
|
||||
|
@ -25,9 +25,9 @@
|
||||
* Test that the field formatter works in a variety of configurations.
|
||||
*/
|
||||
public class TestFieldFormatter extends TestCase {
|
||||
|
||||
|
||||
public void testAllEmpty() {
|
||||
String result = FieldFormatter.escapeAndEnclose("",
|
||||
String result = FieldFormatter.escapeAndEnclose("",
|
||||
new DelimiterSet(DelimiterSet.NULL_CHAR, DelimiterSet.NULL_CHAR,
|
||||
DelimiterSet.NULL_CHAR, DelimiterSet.NULL_CHAR, false));
|
||||
assertEquals("", result);
|
||||
@ -92,7 +92,7 @@ public void testEmptyCharToEscapeString() {
|
||||
String s = "" + nul;
|
||||
assertEquals("\000", s);
|
||||
}
|
||||
|
||||
|
||||
public void testEscapeCentralQuote() {
|
||||
String result = FieldFormatter.escapeAndEnclose("foo\"bar",
|
||||
new DelimiterSet(',', '\n', '\"', '\\', false));
|
||||
|
@ -99,7 +99,7 @@ private List<String> list(String [] items) {
|
||||
|
||||
return asList;
|
||||
}
|
||||
|
||||
|
||||
public void testEmptyLine() throws RecordParser.ParseError {
|
||||
// an empty line should return no fields.
|
||||
|
||||
@ -256,7 +256,7 @@ public void testTwoFields5() throws RecordParser.ParseError {
|
||||
assertListsEqual(null, list(strings),
|
||||
parser.parseRecord("field1,\"field2\""));
|
||||
}
|
||||
|
||||
|
||||
public void testRequiredQuotes0() throws RecordParser.ParseError {
|
||||
RecordParser parser = new RecordParser(
|
||||
new DelimiterSet(',', '\n', '\"', '\\', true));
|
||||
|
@ -89,7 +89,7 @@ public class OracleManagerTest extends ImportJobTestCase {
|
||||
|
||||
// instance variables populated during setUp, used during tests
|
||||
private OracleManager manager;
|
||||
|
||||
|
||||
@Override
|
||||
protected boolean useHsqldbTestServer() {
|
||||
return false;
|
||||
@ -219,7 +219,7 @@ private void runOracleTest(String [] expectedResults) throws IOException {
|
||||
ioe.printStackTrace();
|
||||
fail(ioe.toString());
|
||||
}
|
||||
|
||||
|
||||
File f = new File(filePath.toString());
|
||||
assertTrue("Could not find imported data file", f.exists());
|
||||
BufferedReader r = null;
|
||||
|
@ -44,7 +44,7 @@ public class TestDataDrivenDBInputFormat extends HadoopTestCase {
|
||||
TestDataDrivenDBInputFormat.class);
|
||||
|
||||
private static final String DB_NAME = "dddbif";
|
||||
private static final String DB_URL =
|
||||
private static final String DB_URL =
|
||||
"jdbc:hsqldb:mem:" + DB_NAME;
|
||||
private static final String DRIVER_CLASS = "org.hsqldb.jdbcDriver";
|
||||
|
||||
|
@ -34,7 +34,7 @@
|
||||
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.IOException;
|
||||
import java.sql.Connection;
|
||||
|
||||
/**
|
||||
@ -156,7 +156,7 @@ public void testCreateDeleteJob() throws IOException {
|
||||
} catch (IOException ioe) {
|
||||
// This is expected. Continue.
|
||||
}
|
||||
|
||||
|
||||
// Now delete the job.
|
||||
storage.delete("versionJob");
|
||||
|
||||
|
@ -220,7 +220,7 @@ protected void verifyExport(int expectedNumRecords, Connection conn)
|
||||
ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
|
||||
int actualNumRecords = 0;
|
||||
ResultSet rs = null;
|
||||
try {
|
||||
try {
|
||||
rs = statement.executeQuery();
|
||||
try {
|
||||
rs.next();
|
||||
|
@ -57,7 +57,7 @@
|
||||
* void dropTableIfExists(tableName) -- how to drop a table that may not exist.
|
||||
* void createTableWithColTypes() -- how to create a table with a set of cols.
|
||||
* Configuration getConf() -- specifies config properties specific to a test.
|
||||
* SqoopOptions getSqoopOptions(conf) -- Instantiates the SqoopOptions to use.
|
||||
* SqoopOptions getSqoopOptions(conf) -- Instantiates the SqoopOptions to use.
|
||||
* List<String> getExtraArgs() -- specifies extra argv elements.
|
||||
*/
|
||||
public abstract class ManagerCompatTestCase extends ImportJobTestCase {
|
||||
@ -229,7 +229,7 @@ protected String getTimestampType() {
|
||||
protected String getClobType() {
|
||||
return "CLOB";
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Define a BLOB column that can contain up to 16 MB of data.
|
||||
*/
|
||||
@ -253,7 +253,7 @@ protected String getTinyIntType() {
|
||||
|
||||
//////// These methods indicate how databases respond to various datatypes.
|
||||
//////// Since our comparisons are all string-based, these return strings.
|
||||
|
||||
|
||||
/** @return How we insert the value TRUE represented as an int. */
|
||||
protected String getTrueBoolNumericSqlInput() {
|
||||
return "1";
|
||||
@ -263,7 +263,7 @@ protected String getTrueBoolNumericSqlInput() {
|
||||
protected String getFalseBoolNumericSqlInput() {
|
||||
return "0";
|
||||
}
|
||||
|
||||
|
||||
/** @return How we insert the value TRUE represented as a boolean literal. */
|
||||
protected String getTrueBoolLiteralSqlInput() {
|
||||
return "true";
|
||||
@ -545,7 +545,7 @@ protected String getVarBinarySeqOutput(String asInserted) {
|
||||
* octets, in lower case (e.g., 'ab f0 0f 12 38').
|
||||
*
|
||||
* @param str the input string of hex digits
|
||||
* @return the input string as space-separated lower-case octets.
|
||||
* @return the input string as space-separated lower-case octets.
|
||||
*/
|
||||
protected String toLowerHexString(String str) {
|
||||
// The inserted text is a hex string of the form 'ABABABAB'.
|
||||
|
@ -1008,12 +1008,12 @@ public boolean isWrapperFor(Class<?> iface) throws SQLException {
|
||||
public <T> T unwrap(Class<T> iface) throws SQLException {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public byte[] getBytes(int columnIndex) throws SQLException {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public byte[] getBytes(String columnLabel) throws SQLException {
|
||||
return null;
|
||||
|
@ -38,7 +38,7 @@
|
||||
|
||||
|
||||
/**
|
||||
* Test harness mapper. Instantiate the user's specific type, parse() the input
|
||||
* Test harness mapper. Instantiate the user's specific type, parse() the input
|
||||
* line of text, and throw an IOException if the output toString() line of text
|
||||
* differs.
|
||||
*/
|
||||
|
@ -32,7 +32,7 @@
|
||||
/**
|
||||
* Utility class to help with test cases. Just reads the first (k, v) pair
|
||||
* from a SequenceFile and returns the value part.
|
||||
*
|
||||
*
|
||||
*
|
||||
*/
|
||||
public final class SeqFileReader {
|
||||
|
@ -63,7 +63,7 @@ public List<ToolDesc> getTools() {
|
||||
*/
|
||||
public static class FooTool extends BaseSqoopTool {
|
||||
/** Holds the name of the last user we "operated" as. */
|
||||
private static String lastUser;
|
||||
private static String lastUser;
|
||||
static String getLastUser() {
|
||||
return lastUser;
|
||||
}
|
||||
@ -90,7 +90,7 @@ public void configureOptions(ToolOptions toolOptions) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyOptions(CommandLine in, SqoopOptions out)
|
||||
public void applyOptions(CommandLine in, SqoopOptions out)
|
||||
throws InvalidOptionsException {
|
||||
applyCommonOptions(in, out);
|
||||
}
|
||||
@ -101,7 +101,7 @@ public void validateOptions(SqoopOptions options)
|
||||
validateCommonOptions(options);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testPlugin() {
|
||||
// Register the plugin with SqoopTool.
|
||||
Configuration pluginConf = new Configuration();
|
||||
|
Loading…
Reference in New Issue
Block a user