Java操作FileSystem的简单应用_JAVA_编程开发_程序员俱乐部

中国优秀的程序员网站程序员频道CXYCLUB技术地图
热搜:
更多>>
 
您所在的位置: 程序员俱乐部 > 编程开发 > JAVA > Java操作FileSystem的简单应用

Java操作FileSystem的简单应用

 2014/8/21 1:22:45  beat_it_  程序员俱乐部  我要评论(0)
  • 摘要:看着书写了下通过java操作filesystemapi对hdfs操作的简单操作,贴下代码:packagecom.lyq.study.api;importjava.io.IOException;importorg.apache.hadoop.fs.BlockLocation;importorg.apache.hadoop.fs.FSDataOutputStream;importorg.apache.hadoop.fs.FileStatus;importorg.apache.hadoop.fs
  • 标签:file system files Java 应用 操作

??看着书写了下通过java操作filesystem api对hdfs操作的简单操作,贴下代码:

class="java" name="code">package com.lyq.study.api;

import java.io.IOException;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;


import com.lyq.study.util.HBaseConfigUtils;

public class HDFS_API {
	
	//上传文件到hdfs上
	public static void copyFile() throws IOException {
		FileSystem hdfs = FileSystem.get(HBaseConfigUtils.getHBaseConfig(0));
		Path src = new Path("D:\\data.txt");
		Path dst = new Path("/test/input");
		hdfs.copyFromLocalFile(src, dst);
		System.out.println("===========");
		FileStatus files[] = hdfs.listStatus(dst);
		for(FileStatus file:files){
			System.out.println(file.getPath());
		}
	}
	
	//创建hdfs文件
	public static void createFile() throws IOException {
		byte[] buff = "hello world hadoop study hbase study hello world study hadoop hbase".getBytes();
		FileSystem hdfs = FileSystem.get(HBaseConfigUtils.getHBaseConfig(0));
		Path dst = new Path("/test/input/test.txt");
		FSDataOutputStream outputStream = hdfs.create(dst);
		outputStream.write(buff, 0, buff.length);
	}
	
	//重命名hdfs文件
	public static void renameFile() throws IOException {
		FileSystem hdfs = FileSystem.get(HBaseConfigUtils.getHBaseConfig(0));
		Path frpath = new Path("/test/input/test.txt");
		Path topath = new Path("/test/input/test1.txt");
		boolean rename = hdfs.rename(frpath, topath);
		System.out.println("rename ? "+rename);
	}
	
	//删除hdfs文件
	public static void deleteFile() throws IOException {
		FileSystem hdfs = FileSystem.get(HBaseConfigUtils.getHBaseConfig(0));
		Path delpath = new Path("/test/input/test1.txt");
		boolean isdelete = hdfs.delete(delpath,false);
//		Boolean isdelete = hdfs.delete(delpath,true);//递归删除
		System.out.println("isdelete ? "+isdelete);
	}
	
	//查看hdfs文件最好修改时间
	public static void getLTime() throws IOException {
		FileSystem hdfs = FileSystem.get(HBaseConfigUtils.getHBaseConfig(0));
		Path fpath = new Path("/test/input/a.txt");
		FileStatus filestatus = hdfs.getFileStatus(fpath);
		long modificationTime = filestatus.getModificationTime();
		System.out.println("Modifycation time is: "+modificationTime);
	}
	
	//查看hdfs文件是否存在
	public static void checkFile() throws IOException {
		FileSystem hdfs = FileSystem.get(HBaseConfigUtils.getHBaseConfig(0));
		Path fpath = new Path("/test/input/a.txt");
		boolean isExists = hdfs.exists(fpath);
		System.out.println("Exist ? "+isExists);
	}
	
	//查看hdfs文件在集群上的位置
	public static void fileLoc() throws IOException {
		FileSystem hdfs = FileSystem.get(HBaseConfigUtils.getHBaseConfig(0));
		Path fpath = new Path("/test/input/a.txt");
		FileStatus filestatus = hdfs.getFileStatus(fpath);
		BlockLocation[] blkLocations = hdfs.getFileBlockLocations(filestatus, 0, filestatus.getLen());
		int blockLen = blkLocations.length;
		for(int i=0;i<blockLen;i++){
			String[] hosts = blkLocations[i].getHosts();
			System.out.println("block"+i+"location: "+hosts[i]);
		}
	}
	
	//获取hdfs集群上所以节点
	public static void getList() throws IOException {
		FileSystem fs = FileSystem.get(HBaseConfigUtils.getHBaseConfig(0));
		DistributedFileSystem hdfs = (DistributedFileSystem) fs; 
		DatanodeInfo[] dataNodeStats = hdfs.getDataNodeStats();
		String[] names = new String[dataNodeStats.length];
		for(int i=0;i<dataNodeStats.length;i++){
			names[i] = dataNodeStats[i].getHostName();
			System.out.println("node"+i+"name: "+names[i]);
		}
	}
	
	public static void main(String[] args) throws IOException {
		copyFile();
//		createFile();
//		renameFile();
//		deleteFile();
//		getLTime();
//		checkFile();
//		fileLoc();
//		getList();
	}
}

package com.lyq.study.util;
import org.apache.hadoop.conf.Configuration;
?
public class HBaseConfigUtils {
?/**
? * 获取HBaseConfiguration
? * @param flag?
? * ???集群标识:0,单机;1集群
? * @return
? */
?public static Configuration getHBaseConfig(int flag){
??Configuration conf = new Configuration();
??if(flag > 0){
???//集群
???conf.set("fs.defaultFS", "hdfs://master129:9000/");
???conf.set("mapreduce.framework.name", "local");
???conf.set("mapred.job.tracker", "master129:9001");
???conf.set("hbase.zookeeper.quorum", "master129,slave130,slave131,slave132");
??}else{
???//单机
???conf.set("fs.defaultFS", "hdfs://ubuntu:9000/");
???conf.set("mapreduce.framework.name", "local");
???conf.set("mapred.job.tracker", "ubuntu:9001");
???conf.set("hbase.zookeeper.quorum", "ubuntu");
??}
??
??return conf;
?}
?
}
发表评论
用户名: 匿名