一、部署
1、安装ceph-radosgw(需要开启4780端口)
cd ceph-deploy-admin
yum install -y ceph-radosgw
ceph-deploy rgw create ceph01
2、修改默认7480端口为80端口
echo '[client.rgw.ceph01]' >>ceph.conf
echo 'rgw_frontends = "civetweb port=80"' >>ceph.conf
ceph-deploy --overwrite-conf config push ceph01 ceph02 ceph03 ceph04
systemctl restart ceph-radosgw.target
二、使用
1、通过s3接口使用RGW
1>创建用户
radosgw-admin user create --uid ceph-s3-user --display-name "Ceph S3 User Demo"
2>查看用户信息
radosgw-admin user info --uid ceph-s3-user
3>安装依赖包
yum -y install python-boto\
4>创建管理员用户(可选)
radosgw-admin caps add --uid=ceph-s3-user --caps="users=*;buckets=*;metadata=*;usage=*;zone=*"
4>demo代码(参考:https://docs.ceph.com/en/latest/radosgw/s3/python/)
import boto
import boto.s3.connection
access_key = '7WGXLK24XAGQ7HVXP1JJ'
secret_key = 'sU6xvokvYUuTvuNeihwDheyxExyRGjQwPRP0UbKK'
# create connection
conn = boto.connect_s3(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
host = '192.168.86.51',
is_secure=False, # uncomment if you are not using ssl
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
#create bucket
bucket = conn.create_bucket('ceph-s3-bucket')
#get bucket
for bucket in conn.get_all_buckets():
print "{name}\t{created}".format(
name = bucket.name,
created = bucket.creation_date,
)
2、通过s3cmd管理对象存储
1>安装命令行工具
yum -y install s3cmd
2>配置s3cmd
s3cmd --configure
Access Key:7WGXLK24XAGQ7HVXP1JJ
Secret Key:sU6xvokvYUuTvuNeihwDheyxExyRGjQwPRP0UbKK
Default Region [US]:
S3 Endpoint: 192.168.86.51:80
DNS-style bucket+hostname: 192.168.86.51/%(bucket)s
Encryption password:回车
Path to GPG program [/usr/bin/gpg]:回车
Use HTTPS protocol [Yes]: no
HTTP Proxy server name:
Test access with supplied credentials? [Y/n] Y
Save settings? [y/N] y
3>bucket操作
列举bucket:
s3cmd ls
创建bucket:
sed -i 's/signature_v2 = False/signature_v2 = True/g' ~/.s3cfg
s3cmd mb s3://s3cmd-demo
上传文件到bucket
s3cmd put /etc/fstab s3://s3cmd-demo/fstab-demo
上传目录
s3cmd put /etc/ s3://s3cmd-demo/etc/ --recursive
#查看bucket内容
s3cmd ls s3://s3cmd-demo
下载文件
s3cmd get s3://s3cmd-demo/etc/sysctl.conf /tmp/sysctl.conf
#删除文件
s3cmd rm s3://s3cmd-demo/fstab-demo
#删除目录
s3cmd rm s3://s3cmd-demo/etc/ --recursive
#删除bucket
s3cmd rb s3://s3cmd-demo
#查看pool内存储文件
rados -p default.rgw.buckets.data ls
3、通过swift风格使用RGW
#创建子用户
radosgw-admin subuser create --uid ceph-s3-user --subuser ceph-s3-user:swift --access=full
#生成sk
radosgw-admin key create --subuser ceph-s3-user:swift --key-type=swift --gen-secret
#安装swift客户端
yum -y install python-setuptools python-pip
pip install --upgrade pip
pip install upgrade python-swiftclient
三、高可用设计
高可用ip规划: 192.168.86.88
节点 | ip | 角色 |
---|---|---|
ceph01 | 192.168.86.51 | rgw01 |
ceph02 | 192.168.86.52 | rgw02 |
haproxy01 | 192.168.86.7 | haproxy+keepalive |
haproxy01 | 192.168.86.100 | haproxy+keepalive |
扩展rgw集群
1、安装ceph-radosgw(需要开启4780端口)
cd ceph-deploy-admin
ssh ceph02 "yum install -y ceph-radosgw"
ceph-deploy rgw create ceph02
2、修改默认7480端口为80端口
echo '[client.rgw.ceph02]' >>ceph.conf
echo 'rgw_frontends = "civetweb port=80"' >>ceph.conf
ceph-deploy --overwrite-conf config push ceph01 ceph02 ceph03 ceph04
systemctl restart ceph-radosgw.target
ssh ceph02 "systemctl restart ceph-radosgw.target"
3、验证
curl ceph01
curl ceph02
4、haproxy01和haproxy02 部署keepalived
yum -y install keepalived
systemctl start keepalived.service
systemctl enable keepalived.service
5、修改keepalive配置文件
cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
# vrrp_strict #此处需要修改,否则可能会出现端口无法访问的情况
vrrp_garp_interval 0
vrrp_gna_interval 0
}
# 切换脚本 如果haproxy服务掉线,会将该主机的权重-2,使得backup成为master
vrrp_script chk_haproxy {
script "killall -0 haproxy"
interval 2
weight -2
}
vrrp_instance RGW {
state MASTER #backup节点填BACKUP
interface eth0 #vip配置的网卡必须要真实存在
virtual_router_id 51
priority 100 #backup节点是99
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.86.88/24 #vip地址,虚拟ip地址,配置之后自动在interface选择的网卡中生成
}
track_script {
chk_haproxy #脚本
}
}
重启keepalive
systemctl restart keepalived.service
haproxy01和haproxy02 配置haproxy
1、安装haproxy
yum -y install haproxy
systemctl start haproxy
systemctl enable haproxy
2、haproxy配置文件
cat /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
frontend http_web *:80 #监听名称和端口
mode http
default_backend rgw #默认后端名称
backend rgw #后端名称需要和frontend内后端名称保持一致
balance roundrobin
mode http
server ceph01 192.168.86.51:80 #后端地址
server ceph02 192.168.86.52:80 #后端地址
3、验证keepalive
curl 192.168.86.88
4、通过修改s3cmd配置文件中host_base、host_bucket字段为vip地址验证
vi ~/.s3cfg
host_base = 192.168.86.88:80
host_bucket = 192.168.86.88/%(bucket)s
s3cmd ls
s3cmd mb s3://test-bucket