1:namenode+secondaryNameNode工作机制
2:datanode工作机制
3:HDFS中的通信(代理对象RPC)
下面用代码来实现基本的原理
1:服务端代码
package it.dawn.HDFSPra.RPC.server; import java.io.IOException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RPC.Builder;
import org.apache.hadoop.ipc.RPC.Server; import it.dawn.HDFSPra.RPC.protocol.RpcProtocol; /**
* @version 1.0
* @author Dawn
* @date 2019年4月29日17:10:25
* @return 模拟了一个rpc通信。该类是服务端
*/
public class RpcServer {
public static void main(String[] args) throws HadoopIllegalArgumentException, IOException {
//1.构建rpc框架
Builder builder = new RPC.Builder(new Configuration()); //2.绑定地址
builder.setBindAddress("localhost"); //3.绑定端口号
builder.setPort(6666); //4.绑定协议
builder.setProtocol(RpcProtocol.class); //5.调用协议的实现类
builder.setInstance(new MyRpcProtocol()); //6.创建服务
Server servers= builder.build();
servers.start();
} }
2:代理对象接口
package it.dawn.HDFSPra.RPC.protocol; public interface RpcProtocol {
//1.定义协议的id
public static final long versionID=1L; //2.定义方法(拿到元数据的方式)
public String getMetaData(String path); }
3:代理对象的实现
package it.dawn.HDFSPra.RPC.server; import it.dawn.HDFSPra.RPC.protocol.RpcProtocol; public class MyRpcProtocol implements RpcProtocol{ @Override
public String getMetaData(String path) { return path+": 3 - {BLK_1,blk_2,BLK_3...}";
} }
4:客服端
package it.dawn.HDFSPra.RPC.client; import java.io.IOException;
import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC; import it.dawn.HDFSPra.RPC.protocol.RpcProtocol; /**
* @version 1.0
* @author Dawn
* @date 2019年4月29日17:10:25
* @return 模拟了一个rpc通信。该类是客服端
*/
public class Client {
public static void main(String[] args) throws IOException {
//1.拿到协议
RpcProtocol protocol =RPC.getProxy(RpcProtocol.class, 1L, new InetSocketAddress("localhost", 6666), new Configuration()); //2.发送请求
String result=protocol.getMetaData("/dawnhahhaha"); //3.拿到元数据信息
System.out.println(result);
}
}