hdfs节点内部磁盘balance,在hadoop3之后是支持的了
默认是开启的,参数dfs.disk.balancer.enabled
以下命令执行需要用hdfs用户
## 创建balance计划
hdfs diskbalancer -plan node1
##执行banance计划
hdfs diskbalancer -execute /system/diskbalancer/nodename.plan.json
##查看执行的情况
hdfs diskbalancer -query node1
## 取消执行计划
hdfs diskbalancer -cancel node1.plan.json
##查看banalnce执行的report
#hdfs diskbalancer -fs hdfs://namenode:8020 -report -node node1
附一个balance脚本
#!/bin/bash
Balance_node=$1
#echo $Balance_node
Balance_Time=`date +%Y-%m-%d-%h`
Balance_Path=$(cd "$(dirname "$0")";pwd)
kinit -kt /etc/hadoop/hdfs.keytab hdfs/`hostname -f`
if [[ -z $1 ]];then
echo " please add balance node.Like this:--- hdfs_disk_balance.sh node1"
else
Node_Hostname=` cat /etc/hosts|grep $Balance_node|awk '{print $2}'`
hdfs diskbalancer -plan $Node_Hostname >>$Balance_Path/$Balance_Time'_'$Balance_node'_balance_info'
Plan_json=`cat $Balance_Path/$Balance_Time'_'$Balance_node'_balance_info'|grep -A1 'Writing plan to'|grep '.json'`
hdfs diskbalancer -execute $Plan_json
hdfs diskbalancer -fs hdfs://namenode:8020 -report -node $Node_Hostname >>$Balance_Path/$Balance_Time'_'$Balance_node'_balance_info'
fi
##Plan_json=`hdfs diskbalancer -plan $Node_Hostname|grep -A1 'Writing plan to'|grep '.json'`
##查看执行情况
#hdfs diskbalancer -query node1
##取消计划任务
#hdfs diskbalancer -cancel node1.plan.json
参考链接
https://hadoop.apache.org/docs/r3.3.1/hadoop-project-dist/hadoop-hdfs/HDFSDiskbalancer.html