Skip to content

Commit

Permalink
HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using h…
Browse files Browse the repository at this point in the history
…ard-links to preserve old edit logs, instead of copying them. (Zhe Zhang via Colin P. McCabe)
  • Loading branch information
Colin Patrick Mccabe committed Jun 22, 2015
1 parent 5590e91 commit 7b424f9
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 31 deletions.
3 changes: 3 additions & 0 deletions hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Original file line number Diff line number Diff line change
Expand Up @@ -971,6 +971,9 @@ Release 2.7.1 - UNRELEASED
HDFS-7164. Feature documentation for HDFS-6581. (Arpit Agarwal)

OPTIMIZATIONS
HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using
hard-links to preserve old edit logs, instead of copying them. (Zhe Zhang
via Colin P. McCabe)

BUG FIXES

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.nio.file.Files;
import java.util.List;

import org.apache.commons.logging.Log;
Expand Down Expand Up @@ -127,23 +128,8 @@ public boolean accept(File dir, String name) {

for (String s : fileNameList) {
File prevFile = new File(tmpDir, s);
Preconditions.checkState(prevFile.canRead(),
"Edits log file " + s + " is not readable.");
File newFile = new File(curDir, prevFile.getName());
Preconditions.checkState(newFile.createNewFile(),
"Cannot create new edits log file in " + curDir);
EditLogFileInputStream in = new EditLogFileInputStream(prevFile);
EditLogFileOutputStream out =
new EditLogFileOutputStream(conf, newFile, 512*1024);
FSEditLogOp logOp = in.nextValidOp();
while (logOp != null) {
out.write(logOp);
logOp = in.nextOp();
}
out.setReadyToFlush();
out.flushAndSync(true);
out.close();
in.close();
Files.createLink(newFile.toPath(), prevFile.toPath());
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,16 @@
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.LinkedList;
import java.nio.file.Files;
import java.util.List;
import java.util.regex.Pattern;

import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.inotify.Event;
import org.apache.hadoop.hdfs.inotify.EventBatch;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
Expand All @@ -45,7 +48,11 @@
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
Expand All @@ -54,6 +61,8 @@
import org.junit.Ignore;
import org.junit.Test;

import static org.apache.hadoop.hdfs.inotify.Event.CreateEvent;

import com.google.common.base.Charsets;
import com.google.common.base.Joiner;

Expand Down Expand Up @@ -466,31 +475,50 @@ public void testPreserveEditLogs() throws Exception {
log("Normal NameNode upgrade", 1);
File[] created =
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
List<String> beforeUpgrade = new LinkedList<>();
for (final File createdDir : created) {
List<String> fileNameList =
IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
beforeUpgrade.addAll(fileNameList);
for (String fileName : fileNameList) {
String tmpFileName = fileName + ".tmp";
File existingFile = new File(createdDir, fileName);
File tmpFile = new File(createdDir, tmpFileName);
Files.move(existingFile.toPath(), tmpFile.toPath());
File newFile = new File(createdDir, fileName);
Preconditions.checkState(newFile.createNewFile(),
"Cannot create new edits log file in " + createdDir);
EditLogFileInputStream in = new EditLogFileInputStream(tmpFile,
HdfsServerConstants.INVALID_TXID, HdfsServerConstants.INVALID_TXID,
false);
EditLogFileOutputStream out = new EditLogFileOutputStream(conf, newFile,
(int)tmpFile.length());
out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1);
FSEditLogOp logOp = in.readOp();
while (logOp != null) {
out.write(logOp);
logOp = in.readOp();
}
out.setReadyToFlush();
out.flushAndSync(true);
out.close();
Files.delete(tmpFile.toPath());
}
}

cluster = createCluster();

List<String> afterUpgrade = new LinkedList<>();
for (final File createdDir : created) {
List<String> fileNameList =
IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
afterUpgrade.addAll(fileNameList);
}

for (String s : beforeUpgrade) {
assertTrue(afterUpgrade.contains(s));
}

DFSInotifyEventInputStream ieis =
cluster.getFileSystem().getInotifyEventStream(0);
EventBatch batch = ieis.poll();
Event[] events = batch.getEvents();
assertTrue("Should be able to get transactions before the upgrade.",
events.length > 0);
assertEquals(events[0].getEventType(), Event.EventType.CREATE);
assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade");
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}

private static enum EditLogsFilter implements FilenameFilter {
private enum EditLogsFilter implements FilenameFilter {
INSTANCE;

@Override
Expand Down

0 comments on commit 7b424f9

Please sign in to comment.