在部署openstack前必须先部署Ceph,因为在部署Openstack时需要制定Ceph中的Pool存储池,来定义存储什么东西;

Ceph搭建

主机规划

主机名 IP地址 角色 磁盘规划
node1 172.16.1.14 deploy,mon,mds,mgr,osd sda7-18(journal)1220G,sdb-sdm(osd)128T
node2 172.16.1.15 osd sda7-18(journal)1220G,sdb-sdm(osd)128T
node3 172.16.1.16 osd sda7-18(journal)1220G,sdb-sdm(osd)128T

配置Yum源

1
2
3
4
5
6
7
8
9
10
11
# ps:所有主机执行
cd /etc/yum.repos.d
mkdir bak
mv CentOS-* bak
cp bak/CentOS-Base.repo ./
sed -i 's/#baseurl=/baseurl=/g' CentOS*.repo
sed -i 's/mirrorlist=/#mirrorlist=/g' CentOS*.repo
sed -i 's/mirror.centos.org/mirrors.aliyun.com/g' CentOS*.repo

yum clean all && yum makecache
yum -y update

配置Ceph源

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# ps:所有主机执行
设置ceph源
[root@node2 /]# cd /etc/yum.repos.d/
[root@node2 yum.repos.d]# vi ceph.repo
[ceph]
[Ceph-SRPMS]
name=Ceph SRPMS packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-jewel/el7/SRPMS/
enabled=1
gpgcheck=0
type=rpm-md

[Ceph-aarch64]
name=Ceph aarch64 packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-jewel/el7/aarch64/
enabled=1
gpgcheck=0
type=rpm-md

[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-jewel/el7/noarch/
enabled=1
gpgcheck=0
type=rpm-md

[Ceph-x86_64]
name=Ceph x86_64 packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-jewel/el7/x86_64/
enabled=1
gpgcheck=0
type=rpm-md

配置epel源

1
2
3
4
5
6
# 所有主机执行
yum -y install epel-release

sed -i 's/metalink=/#metalink=/g' /etc/yum.repos.d/epel*.repo
sed -i 's/#baseurl=/baseurl=/g' /etc/yum.repos.d/epel*.repo
sed -i 's/download.fedoraproject.org\/pub/mirrors.aliyun.com/g' /etc/yum.repos.d/epel*.repo

关闭防火墙与selinux

1
2
3
4
# 所有主机执行
systemctl disable firewalld && systemctl stop firewalld

sed -i 's/SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config && setenforce 0

时钟同步

1
2
3
4
5
6
7
8
9
# 所有主机执行
安装ntp
yum install ntpdate

# 设置任务定时器
crontab -e
* * * * * /sbin/netpdate ntp.aliyun.com > /dev/null &

ntpdate ntp.aliyun.com

安装常用软件包

1
2
# 所有主机执行
yum -y install net-tools wget python-devel libffi-devel gcc openssl-devel libseinux-python yum-utils python-setuptools selinux-policy*

设置pip源

deploy节点

1
2
3
4
5
6
7
8
9
mkdir -p ~/.pip/

cat > ~/.pip/pip.conf << EOF
index-url = http://mirrors.aliyun.com/pypi/simple
trusted-host = mirrors.aliyun.com
EOF

yum -y install python-pip
pip install -U pip

设置免密登录

deploy主机执行

1
2
3
4
5
6
yum -y install openssh-clients

ssh-keygen
ssh-copy-id -i root@172.16.1.14
ssh-copy-id -i root@172.16.1.15
ssh-copy-id -i root@172.16.1.16

修改hosts文件

1
2
3
4
5
6
7
8
cat >> /etc/hosts << EOF
172.16.1.14 node1
172.16.1.15 node2
172.16.1.16 node3
EOF

scp /etc/hosts node2:/etc
scp /etc/hosts node3:/etc

升级selinux-policy

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# 注:如果不升级selinux-polic在安装ceph软件时出错
# ps!!!:所有主机执行
cd /data
mkdir pkgs
cd pkgs

wget http://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/selinux-policy-3.13.1-252.el7_7.6.noarch.rpm
wget http://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/selinux-policy-devel-3.13.1-252.el7_7.6.noarch.rpm
wget http://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/selinux-policy-doc-3.13.1-252.el7_7.6.noarch.rpm
wget http://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/selinux-policy-minimum-3.13.1-252.el7_7.6.noarch.rpm
wget http://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/selinux-policy-mls-3.13.1-252.el7_7.6.noarch.rpm
wget http://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/selinux-policy-sandbox-3.13.1-252.el7_7.6.noarch.rpm
wget http://mirrors.aliyun.com/centos/7/updates/x86_64/Packages/selinux-policy-targeted-3.13.1-252.el7_7.6.noarch.rpm

yum update *.rpm -y

export CEPH_DEPLOY_REPO_URL=http://mirrors.aliyun.com/ceph/rpm-mimic/el7
export CEPH_DEPLOY_GPG_URL=http://mirrors.aliyun.com/ceph/keys/release.asc

yum install -y selinux-policy-devel-3.13.1-252.el7_7.6.noarch.rpm
yum install -y selinux-policy-doc-3.13.1-252.el7_7.6.noarch.rpm
yum install -y selinux-policy-minimum-3.13.1-252.el7_7.6.noarch.rpm
yum install -y selinux-policy-mls-3.13.1-252.el7_7.6.noarch.rpm
yum install -y selinux-policy-sandbox-3.13.1-252.el7_7.6.noarch.rpm
yum install -y selinux-policy-targeted-3.13.1-252.el7_7.6.noarch.rpm

安装Ceph-deploy

deploy节点执行

1
2
3
4
5
6
7
# 创建ceph目录
mkdir -p /etc/ceph
cd /etc/ceph
# 安装ceph-deploy
yum -y install ceph-deploy
# 查看ceph-deploy版本
ceph-deploy --version

安装ceph软件

1
2
3
4
5
6
7
8
9
# 所有节点执行
pip uninstall urllib3
yum -y install librados2
yum -y install python-urllib3
yum -y install ceph
yum -y install ceph-radosgw

# 在yzpool1上执行,cd /etc/ceph
ceph-deploy install node1 node2 node3

创建集群(monitor)

1
2
3
4
5
# 在yznode-1上执行
cd /etc/ceph
ceph-deploy new node1 node2 node3
# 生成配置文件
more ceph.conf

初始化密钥

1
2
3
4
5
6
7
8
# 在yzpool上执行
cd /etc/ceph
ceph-deploy mon create-initial
# 检查状态
ceph -s
netstat -lnpt | grep 6789

# 注:如果有多台monitor,那么将ceph.client.admin.keyring传到另外两台monitor上,另外两台monitor才可以使用ceph -s

创建OSD

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# 使用fdisk /dev/sda准备12个分区大小20G
# 准备作为日志盘,划分12*20G=240G的空间

# 创建OSD命令如下:
parted /dev/sdb mklabel gpt -s
parted /dev/sdc mklabel gpt -s
parted /dev/sdd mklabel gpt -s
parted /dev/sde mklabel gpt -s
parted /dev/sdf mklabel gpt -s
parted /dev/sdg mklabel gpt -s
parted /dev/sdh mklabel gpt -s
parted /dev/sdi mklabel gpt -s
parted /dev/sdj mklabel gpt -s
parted /dev/sdk mklabel gpt -s
parted /dev/sdl mklabel gpt -s
parted /dev/sdm mklabel gpt -s

# 在node1执行:
初始化各节点磁盘
cd /etc/ceph
ceph-deploy disk zap node1 /dev/sdb
ceph-deploy disk zap node1 /dev/sdc
ceph-deploy disk zap node1 /dev/sdd
ceph-deploy disk zap node1 /dev/sde
ceph-deploy disk zap node1 /dev/sdf
ceph-deploy disk zap node1 /dev/sdg
ceph-deploy disk zap node1 /dev/sdh
ceph-deploy disk zap node1 /dev/sdi
ceph-deploy disk zap node1 /dev/sdj
ceph-deploy disk zap node1 /dev/sdk
ceph-deploy disk zap node1 /dev/sdl
ceph-deploy disk zap node1 /dev/sdm

ceph-deploy disk zap node2 /dev/sdb
ceph-deploy disk zap node2 /dev/sdc
ceph-deploy disk zap node2 /dev/sdd
ceph-deploy disk zap node2 /dev/sde
ceph-deploy disk zap node2 /dev/sdf
ceph-deploy disk zap node2 /dev/sdg
ceph-deploy disk zap node2 /dev/sdh
ceph-deploy disk zap node2 /dev/sdi
ceph-deploy disk zap node2 /dev/sdj
ceph-deploy disk zap node2 /dev/sdk
ceph-deploy disk zap node2 /dev/sdl
ceph-deploy disk zap node2 /dev/sdm

ceph-deploy disk zap node3 /dev/sdb
ceph-deploy disk zap node3 /dev/sdc
ceph-deploy disk zap node3 /dev/sdd
ceph-deploy disk zap node3 /dev/sde
ceph-deploy disk zap node3 /dev/sdf
ceph-deploy disk zap node3 /dev/sdg
ceph-deploy disk zap node3 /dev/sdh
ceph-deploy disk zap node3 /dev/sdi
ceph-deploy disk zap node3 /dev/sdj
ceph-deploy disk zap node3 /dev/sdk
ceph-deploy disk zap node3 /dev/sdl
ceph-deploy disk zap node3 /dev/sdm

# 格式化硬盘并且与系统盘分区连接
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdb --journal /dev/sda7 node1
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdc --journal /dev/sda8 node1
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdd --journal /dev/sda9 node1
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sde --journal /dev/sda10 node1
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdf --journal /dev/sda11 node1
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdg --journal /dev/sda12 node1
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdh --journal /dev/sda13 node1
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdi --journal /dev/sda14 node1
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdj --journal /dev/sda15 node1
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdk --journal /dev/sda16 node1
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdl --journal /dev/sda17 node1
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdm --journal /dev/sda18 node1


ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdb --journal /dev/sda7 node2
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdc --journal /dev/sda8 node2
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdd --journal /dev/sda9 node2
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sde --journal /dev/sda10 node2
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdf --journal /dev/sda11 node2
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdg --journal /dev/sda12 node2
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdh --journal /dev/sda13 node2
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdi --journal /dev/sda14 node2
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdj --journal /dev/sda15 node2
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdk --journal /dev/sda16 node2
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdl --journal /dev/sda17 node2
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdm --journal /dev/sda18 node2


ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdb --journal /dev/sda7 node3
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdc --journal /dev/sda8 node3
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdd --journal /dev/sda9 node3
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sde --journal /dev/sda10 node3
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdf --journal /dev/sda11 node3
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdg --journal /dev/sda12 node3
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdh --journal /dev/sda13 node3
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdi --journal /dev/sda14 node3
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdj --journal /dev/sda15 node3
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdk --journal /dev/sda16 node3
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdl --journal /dev/sda17 node3
ceph-deploy osd create --filestore --fs-type xfs --data /dev/sdm --journal /dev/sda18 node3

# 查看盘符
[root@node1 ~]# df -hT
/dev/dm-1 xfs 7.3T 117M 7.3T 1% /var/lib/ceph/osd/ceph-0
/dev/dm-0 xfs 7.3T 117M 7.3T 1% /var/lib/ceph/osd/ceph-1
/dev/dm-2 xfs 7.3T 117M 7.3T 1% /var/lib/ceph/osd/ceph-2
/dev/dm-3 xfs 7.3T 117M 7.3T 1% /var/lib/ceph/osd/ceph-3
/dev/dm-4 xfs 7.3T 117M 7.3T 1% /var/lib/ceph/osd/ceph-4
/dev/dm-5 xfs 7.3T 117M 7.3T 1% /var/lib/ceph/osd/ceph-5
/dev/dm-6 xfs 7.3T 117M 7.3T 1% /var/lib/ceph/osd/ceph-6
/dev/dm-7 xfs 7.3T 117M 7.3T 1% /var/lib/ceph/osd/ceph-7
/dev/dm-8 xfs 7.3T 117M 7.3T 1% /var/lib/ceph/osd/ceph-8
/dev/dm-9 xfs 7.3T 117M 7.3T 1% /var/lib/ceph/osd/ceph-9
/dev/dm-11 xfs 7.3T 117M 7.3T 1% /var/lib/ceph/osd/ceph-11
/dev/dm-10 xfs 7.3T 117M 7.3T 1% /var/lib/ceph/osd/ceph-10

# 验证日志写入位置
[root@node1 ceph-0]# ls -l
总用量 60
-rw-r--r-- 1 ceph ceph 420 1月 13 10:04 activate.monmap
-rw-r--r-- 1 ceph ceph 37 1月 13 10:04 ceph_fsid
drwxr-xr-x 226 ceph ceph 8192 1月 13 16:29 current
-rw-r--r-- 1 ceph ceph 37 1月 13 10:04 fsid
lrwxrwxrwx 1 root root 9 1月 13 10:04 journal -> /dev/sda8
-rw------- 1 ceph ceph 56 1月 13 10:04 keyring
-rw-r--r-- 1 ceph ceph 21 1月 13 10:04 magic
-rw-r--r-- 1 ceph ceph 41 1月 13 10:04 osd_key
-rw-r--r-- 1 ceph ceph 6 1月 13 10:04 ready
-rw-r--r-- 1 ceph ceph 2 1月 13 10:04 require_osd_release
-rw-r--r-- 1 ceph ceph 4 1月 13 10:04 store_version
-rw-r--r-- 1 ceph ceph 53 1月 13 10:04 superblock
-rw-r--r-- 1 ceph ceph 10 1月 13 10:04 type
-rw-r--r-- 1 ceph ceph 2 1月 13 10:04 whoami

创建mgr

1
2
3
4
5
6
7
8
9
# 官方文档建议在每个monitor上都启动一个 mgr
# 在yz-node1上执行:
cd /etc/ceph
ceph-deploy mgr create node1 node2 node3

# 重启mgr进程
systemctl restart ceph-mgr@yz-node1
# 查看mgr状态
systemctl status ceph-mgr@yz-node1

启动dashboard

1
2
3
4
5
6
7
8
9
10
11
12
# 在yzp-node1上执行:
ceph mgr module enable dashboard
ceph dashboard create-self-signed-cert
ceph mgr services(此命令查看出来的地址是随机漂移到任何一台主机上面,在重启此服务的情况下才会去漂移到某台主机上面)

# 设置登录密码:
ceph dashboard set-login-credentials admin ptmjYgb2020

# 重启dashboard
ceph mgr module disable dashboard
ceph mgr module enable dashboard
netstat -lnpt | grep 8443

创建pool

通常在创建pool之前,需要覆盖默认的pg_num,官方推荐:

若少于5个OSD,设置pg_num为128。

5~10个OSD,设置pg_num为512

10~50个OSD,设置pg_num为4096

超过50个OSD,可以参考pgcalc计算。

PG在Ceph中的作用

从上面可以看到所有数据其实都是抽象成多个object,每个object都会对应到唯一的一个pg上(多副本表示有多个相同的pg,当然object也自然是多副本的),然后pg映射到osd上存储,所以pg可以说是ceph的核心概念了,那为什么要引进pg这个概念呢?

这是因为如果要追踪的目标如果是object,那么要追踪的数量就太多了,这样可能会加大复杂性,而且也会带来不小的开销,于是引进pg这个概念,把object装进pg中,以pg为存储单元个体,直接追踪pg状态,一般pg数量是远远小于object数量的,

PG计算方法

Total PGs = (Total_number_of_OSD * 100) / max_replication_count但每个池中pg数量最好接近或等于2的次方

例:

有36个osd,2副本,5个poolTotal PGs =36*100/2=1800每个pool 的PG=1800/5=360,那么创建pool的时候就指定pg为512

ceph osd pool create pool_name 512

创建openstack需要的Pool

1
2
3
4
ceph osd pool create images 512
ceph osd pool create volumes 512
ceph osd pool create backups 512
ceph osd pool create vms 512

四个pool的作用

1
2
3
4
a、创建rbd块设备的pool池“vms”     作为nova虚拟机使用的存储池;
b、创建rbd块设备的pool池“images” 作为glance镜像的存储池;
c、创建rbd块设备的pool池“volumes” 作为cinder的存储池;
d、创建rbd块设备的pool池“backups” 作为cinder卷备份的存储池;

查看建立的pool

1
2
3
4
5
[root@yz-node1 ceph]# ceph osd lspools
1 images
2 volumes
3 backups
4 vms

upload successful

OpenStack搭建

主机名 IP 角色
node1 172.16.1.14 网络节点:kola-deploy,controller,neutron,nova,cinder
node2 172.16.1.15 计算节点:nova,cinder
node3 172.16.1.16 计算节点:nova,cinder

下载docker

1
2
3
4
# 所有主机执行
yum install -y docker-engine-1.12.6-1.el7.centos.x86_64.rpm
# 启动docker
systemctl start docker && systemctl enable docker && systemctl status docker

设置docker镜像源

1
2
3
4
5
6
7
8
9
10
11
12
# ps:所有主机
mkdir -p /etc/docker
cat > /etc/docker/daemon.json << EOF
{
"insecure-registries": [
"registry.docker-cn.com" // docker镜像库
],
"registry-mirrors": ["https://xxx.mirror.aliyuncs.com"] // AliYun 镜像加速(如果使用内网源则不需要配置镜像加速)
}
EOF

systemctl restart docker

设置docker volume卷挂载方式

1
2
3
4
5
6
7
8
9
10
# ps:所有主机
mkdir /etc/systemd/system/docker.service.d
tee /etc/systemd/system/docker.service.d/kolla.conf << 'EOF'
[Service]
MountFlags=shared
EOF
# 解释一下:MountFlags=shared,表示当宿主机新增分区时,docker服务无需重启即可识别。添加此参数后期OpenStack中使用cinder存储时,新加磁盘也比较方便

systemctl daemon-reload
systemctl restart docker

设置docker Yum源

1
2
3
4
5
6
7
# ps:所有主机
vi /etc/yum.repos.d/docker.repo
[docker]
baseurl = https://download.docker.com/linux/centos/7/x86_64/stable/
gpgcheck = 1
gpgkey = https://download.docker.com/linux/centos/gpg
name = Docker main Repository

Deploy主机设置虚拟方式

1
2
3
4
5
6
7
8
9
10
mkdir -p /etc/kolla/config/nova
cat << EOF > /etc/kolla/config/nova/nova-compute.conf
[libvirt]
virt_type=kvm
cpu_mode = none
images_rbd_pool=vms
images_type=rbd
images_rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_user=nova
EOF

如果是在虚拟机上部署的话需要将virt_type 改为qemu,如果不改成qemu,在openstack之上在起虚机,这个虚机是起不来的

deploy主机安装ansible、kolla-ansible

1
2
3
4
5
6
7
8
9
10
11
# 安装ansible
pip install ansible

# 配置ansible
mkdir -p /etc/ansible/
tee /etc/ansible/ansible.cfg << 'EOF'
[defaults]
host_key_checking=False
pipelining=True
forks=100
EOF

升级setuptools

1
2
3
4
5
6
pip install -U setuptools==44.0.0
pip install wheel
pip install kolla-ansible==8.0.1 --ignore-installed PyYAML
kola—ansible
pip install python-openstackclient --ignore-installed ipaddress pyOpenSSL
# 安装时会有几个软件包是已经安装的,但是版本较低,无所谓!

由于本次安装使用本地私有镜像仓库,为避免去公网进行校验,需更改以下文件

在进行kolla-ansible -i /etc/kolla/multinode bootstrap-servers后,此文件以及docker.repo文件会被恢复至默认,但不影响deploy和prechecks

1
2
3
4
5
6
7
8
9
10
cp /usr/share/kolla-ansible/ansible/roles/baremetal/tasks/pre-install.yml  /usr/share/kolla-ansible/ansible/roles/baremetal/tasks/pre-install.yml.bak

vi /usr/share/kolla-ansible/ansible/roles/baremetal/tasks/pre-install.yml
注释以下行:
#- name: Install docker rpm gpg key
#rpm_key:
#state: present
#key: "{{ docker_yum_url }}/gpg"
#become: True
#when: docker_yum_gpgcheck | bool
1
2
3
4
5
6
7
8
9
10
11
12
13
14
cp /usr/share/kolla-ansible/ansible/roles/baremetal/tasks/install.yml 
/usr/share/kolla-ansible/ansible/roles/baremetal/tasks/install.yml.bak

vi /usr/share/kolla-ansible/ansible/roles/baremetal/tasks/install.yml
修改以下行:
- name: Install yum packages
package:
#name: "{{ item }}" 1
#state: present 2
update_cache: no 3
become: True
with_items: "{{ redhat_pkg_install }}"
when: ansible_os_family == 'RedHat'
register: yum_install_result

deploy主机更改配置文件模板

1
2
cp -r /usr/share/kolla-ansible/etc_examples/kolla /etc/
cp /usr/share/kolla-ansible/ansible/inventory/* /etc/kolla/

deploy主机生成密码

1
2
3
4
5
6
7
cd /etc/kolla/
kolla-genpwd

# 修改密码,此密码为web登陆密码
[root@node2 kolla]# cat passwords.yml | grep keystone_admin
keystone_admin_password: tmbHrml@2020
# 此时如果想设定指定的密码可以直接在这块改tmbHrml@2020

deploy主机定义角色

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
[root@yz-node1 kolla]# more multinode 
# These initial groups are the only groups required to be modified. The
# additional groups are for more control of the environment.
[control]
# These hostname must be resolvable from your deployment host
node1
# The above can also be specified as follows:
#control[01:03] ansible_user=kolla

# The network nodes are where your l3-agent and loadbalancers will run
# This can be the same as a host in the control group
[network]
node1

[compute]
node1
node2
node3

[monitoring]
node1

# When compute nodes and control nodes use different interfaces,
# you need to comment out "api_interface" and other interfaces from the globals.yml
# and specify like below:
#compute01 neutron_external_interface=eth0 api_interface=em1 storage_interface=em1 tunnel_interface=em1

[storage]
node1
node2
node3

[deployment]
node1

deploy主机修改globals配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
[root@yzpool1 kolla]# cat globals.yml |grep -v "#" |grep -Ev "^$|^[#;]"
---
kolla_base_distro: "centos"
kolla_install_type: "source"
openstack_release: "stein"
node_custom_config: "/etc/kolla/config"
kolla_internal_vip_address: "172.16.1.20" # 平台访问的浮动IP
kolla_external_vip_address: "172.16.1.20" # 此VIP必须与集群中的机器互通并且在同网段内
docker_registry: "registry.docker-cn.com"
network_interface: "eno2" # openstack内部管理网络、本机物理IP所在网卡,管理口
kolla_external_vip_interface: "{{ network_interface }}"
api_interface: "{{ network_interface }}"
storage_interface: "{{ network_interface }}"
cluster_interface: "{{ network_interface }}"
tunnel_interface: "{{ network_interface }}"
dns_interface: "{{ network_interface }}"
neutron_external_interface: "eno1" # 访问openstack的物理网卡不需要配置IP,业务口
neutron_plugin_agent: "openvswitch"
enable_ceph: "no"
enable_ceph_rgw: "yes"
enable_cinder: "yes"
enable_manila_backend_cephfs_native: "yes"
enable_neutron_qos: "yes"
enable_ceph_rgw_keystone: "yes"
glance_backend_ceph: "yes"
glance_enable_rolling_upgrade: "no"
gnocchi_backend_storage: "ceph"
cinder_backend_ceph: "yes"
nova_backend_ceph: "yes"
ironic_dnsmasq_dhcp_range:
tempest_image_id:
tempest_flavor_ref_id:
tempest_public_network_id:
tempest_floating_network_name:

在ceph集群中创建nova、glance、cinder、cinder-backup等组件使用ceph的用户并生产keyring文件

1
2
3
4
5
6
7
8
9
cd /etc/ceph
ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' -o ceph.client.cinder.keyring
ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' -o ceph.client.glance.keyring
ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups, allow rwx pool=volumes' -o ceph.client.cinder-backup.keyring
ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=vms, allow rwx pool=volumes, allow rx pool=images' -o ceph.client.nova.keyring

如果出现Error EINVAL: key for client.cinder exists but cap mon does not match则先执行更新cap
ceph auth caps client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'
https://www.cnblogs.com/bfmq/p/6073334.html

拷贝配置文件及keying文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
cd /etc/kolla/config
mkdir cinder
cd /etc/kolla/config/cinder
mkdir cinder-volume
mkdir cinder-backup
cd /etc/kolla/config
mkdir glance

scp /etc/ceph/{ceph.conf,ceph.client.glance.keyring} node1://etc/kolla/config/glance/
scp /etc/ceph/{ceph.conf,ceph.client.cinder.keyring} node1://etc/kolla/config/cinder/cinder-volume/
scp /etc/ceph/{ceph.conf,ceph.client.cinder.keyring} node1://etc/kolla/config/cinder/cinder-backup/
scp /etc/ceph/ceph.client.cinder-backup.keyring node1://etc/kolla/config/cinder/cinder-backup/
scp /etc/ceph/{ceph.conf,ceph.client.nova.keyring,ceph.client.cinder.keyring} node1://etc/kolla/config/nova/
scp /etc/ceph/ceph.conf node1://etc/kolla/config/cinder

使用kolla的合并配置特性

配置外部ceph,使用kolla的合并配置特性,对相应的组件的ceph存储pool池进行配置。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# glance的配置镜像存储
vi /etc/kolla/config/glance/glance-api.conf
# 添加:
[glance_store]
stores = rbd
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf

# cinder的配置卷存储
vi /etc/kolla/config/cinder/cinder-volume.conf
# 添加:
[DEFAULT]
enabled_backends=rbd-1
[rbd-1]
rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_user=cinder
backend_host=rbd:volumes
rbd_pool=volumes
volume_backend_name=rbd-1
volume_driver=cinder.volume.drivers.rbd.RBDDriver
rbd_secret_uuid=ea241bba-2031-4b0e-b750-c8356cbb46a8
# 注意:rbd_secret_uuid在/etc/kolla/passwords.yml 文件中找
# cat /etc/passwords.yml | grep rbd_secret_uuid

# cinder-backup的配置
vi /etc/kolla/config/cinder/cinder-backup.conf
# 添加:
[DEFAULT]
backup_ceph_conf=/etc/ceph/ceph.conf
backup_ceph_user=cinder-backup
backup_ceph_chunk_size = 134217728
backup_ceph_pool=backups
backup_driver = cinder.backup.drivers.ceph
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true

# nova的配置
vi /etc/kolla/config/nova/nova-compute.conf
# 添加:
[libvirt]
virt_type=kvm
cpu_mode = none
images_rbd_pool=vms
images_type=rbd
images_rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_user=nova

bootstrap-servers初始化基础安装环境

1
2
# 所有节点执行
pip install -U docker --ignore-installed requests
1
2
3
4
5
6
# 在deploy节点执行
修改/etc/kolla/globals.yml先注释掉docker_registry
// # docker_registry: registry.docker-cn.com

# 执行:
kolla-ansible -i /etc/kolla/multinode bootstrap-servers

deploy主机执行precheck(检查环境是不是可用)

1
kolla-ansible -i /etc/kolla/multinode prechecks

deploy主机执行下载镜像

1
2
3
4
修改/etc/kolla/globals.yml取消注释docker_registry
docker_registry: "registry.docker-cn.com"
# 执行:
kolla-ansible -i /etc/kolla/multinode pull

deploy主机执行部署

1
2
3
4
5
kolla-ansible -i /etc/kolla/multinode deploy
# 生成环境变量
kolla-ansible post-deploy
source /etc/kolla/admin-openrc.sh
# 部署完成!!!

如果在部署过程中出现问题,需要重新部署

1
2
3
4
5
6
7
8
9
10
11
12
13
# 删除/etc/kolla/所有容器映射目录
# 清除所有节点容器
docker rm -f $(docker ps -a | awk '{print $1}')
# 清除volume卷
docker volume remove $(docker volume ls | awk '{print $2}')
# 停止docker服务
systemctl stop docker
# 清除tmp下文件
rm -rf /var/lib/docker/tmp/*
# 启动docker服务
systemctl restart docker
# 再次部署一切正常
kolla-ansible -i /etc/kolla/multinode deploy

配置网络环境

修改网络配置支持vlan

参考文档:https://www.dazhuanlan.com/2020/01/30/5e31bb414e7eb/

由于环境要求,平台将来业务需要采用用租户、多用户,通过划分不同vlan实现租户网络隔离,而此处虚拟交换机采用的是openvswitch默认没有vlan模式外网,此处需要手动更改

网络节点操作

1
2
3
4
5
6
sed -i 's/tenant_network_types = .*/tenant_network_types= vlan,vxlan/g' /etc/kolla/neutron-openvswitch-agent/ml2_conf.ini
sed -i 's/network_vlan_ranges =.*/network_vlan_ranges = physnet1:100:600/g' /etc/kolla/neutron-openvswitch-agent/ml2_conf.ini
sed -i 's/tenant_network_types = .*/tenant_network_types= vlan,vxlan/g' /etc/kolla/neutron-server/ml2_conf.ini
sed -i 's/network_vlan_ranges =.*/network_vlan_ranges = physnet1:100:600/g' /etc/kolla/neutron-server/ml2_conf.ini

# network_vlan_ranges:vlan范围
1
2
# 重启neutron容器
docker restart neutron_server neutron_openvswitch_agent

计算节点操作

1
2
3
4
5
6
7
8
9
10
# 创建一个br-ex外部网桥,并关联到主机的eno1物理网卡上(也就是关联到openstack业务口)
docker exec -u root -it neutron_openvswitch_agent ovs-vsctl add-br br-ex
docker exec -u root -it neutron_openvswitch_agent ovs-vsctl add-port br-ex eno1

# 设置vlan范围
sed -i 's/tenant_network_types = .*/tenant_network_types= vlan,vxlan/g' /etc/kolla/neutron-openvswitch-agent/ml2_conf.ini
sed -i 's/network_vlan_ranges =.*/network_vlan_ranges = physnet1:100:600/g' /etc/kolla/neutron-openvswitch-agent/ml2_conf.ini

# 重启neutron容器
docker restart neutron_server neutron_openvswitch_agent

创建后端存储并挂载到本地

1
2
3
4
5
6
7
8
9
# 在控制节点创建即可
ceph df
rbd create vms/docker --size 10240
mkdir /data
rbd feature disable vms/docker object-map fast-diff deep-flatten
rbd map vms/docker
mkfs.ext4 -T largefile /dev/rbd0
mount /dev/rbd0 /data
rbd ls vms // 查看存储池中的数据