Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...





[root@TVM1 ~]# source /home/stack/myansible/bin/activate
(myansible) [root@TVM1 ~]# cd /home/stack/myansible/lib/python3.6/site-packages/tvault_configurator
(myansible) [root@TVM1 tvault_configurator]# python recreate_conf.py
(myansible) [root@TVM1 tvault_configurator]# systemctl restart tvault-configUsername: admin
Password: passwordheader:
header_color: blue
body_text_color: "#DC143C"
body_text:
header_font_size: 25px
body_text_font_size: 22pxsource /home/stack/myansible/bin/activate[DEFAULT]
vault_storage_type = nfs
vault_storage_nfs_export = 192.168.1.34:/mnt/tvault/tvm5
vault_storage_nfs_options = nolock,soft,timeo=180,intr,lookupcache=none
vault_data_directory_old = /var/triliovault
vault_data_directory = /var/trilio/triliovault-mounts
log_file = /var/log/kolla/triliovault-datamover/tvault-contego.log
debug = False
verbose = True
max_uploads_pending = 3
max_commit_pending = 3
dmapi_transport_url = rabbit://openstack:[email protected]:5672,openstack:[email protected]:5672,openstack:[email protected]:5672//
[dmapi_database]
connection = mysql+pymysql://dmapi:x5nvYXnAn4rXmCHfWTK8h3wwShA4vxMq3gE2jH57@kolla-victoriaR-internal.triliodata.demo:3306/dmapi
[libvirt]
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = volumes
[ceph]
keyring_ext = .volumes.keyring
ceph_dir = /etc/ceph/directory1/,/etc/ceph/directory2/
[contego_sys_admin]
helper_command = sudo /usr/bin/privsep-helper
[conductor]
use_local = True
[oslo_messaging_rabbit]
ssl = false
[cinder]
http_retries = 10



[DEFAULT]
vault_storage_type = nfs
vault_storage_nfs_export = 192.168.1.34:/mnt/tvault/tvm5
vault_storage_nfs_options = nolock,soft,timeo=180,intr,lookupcache=none
vault_data_directory_old = /var/triliovault
vault_data_directory = /var/trilio/triliovault-mounts
log_file = /var/log/kolla/triliovault-datamover/tvault-contego.log
debug = False
verbose = True
max_uploads_pending = 3
max_commit_pending = 3
dmapi_transport_url = rabbit://openstack:[email protected]:5672,openstack:[email protected]:5672,openstack:[email protected]:5672//
[dmapi_database]
connection = mysql+pymysql://dmapi:x5nvYXnAn4rXmCHfWTK8h3wwShA4vxMq3gE2jH57@kolla-victoriaR-internal.triliodata.demo:3306/dmapi
[libvirt]
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = volumes
[ceph]
keyring_ext = .keyring
ceph_dir = /etc/ceph/
[contego_sys_admin]
helper_command = sudo /usr/bin/privsep-helper
[conductor]
use_local = True
[oslo_messaging_rabbit]
ssl = false
[cinder]
http_retries = 10curl -i -X PUT \
-H "X-Auth-Token:gAAAAABh0ttjiKRPpVNPBjRjZywzsgVton2HbMHUFrbTXDhVL1w2zCHF61erouo4ZUjGyHVoIQMG-NyGLdR7nexmgOmG7ed66LJ3IMVul1LC6CPzqmIaEIM48H0kc-BGvhV0pvX8VMZiozgFdiFnqYHPDvnLRdh7cK6_X5dw4FHx_XPmkhx7PsQ" \
-H "Content-Type:application/json" \
-d \
'{
"metadata": {
"workload_id": "c13243a3-74c8-4f23-b3ac-771460d76130",
"workload_name": "workload-c13243a3-74c8-4f23-b3ac-771460d76130"
}
}' \
'https://kolla-victoria-ubuntu20-1.triliodata.demo:9311/v1/secrets/f3b2fce0-3c7b-4728-b178-7eb8b8ebc966/metadata'
curl -i -X GET \
-H "X-Auth-Token:gAAAAABh0ttjiKRPpVNPBjRjZywzsgVton2HbMHUFrbTXDhVL1w2zCHF61erouo4ZUjGyHVoIQMG-NyGLdR7nexmgOmG7ed66LJ3IMVul1LC6CPzqmIaEIM48H0kc-BGvhV0pvX8VMZiozgFdiFnqYHPDvnLRdh7cK6_X5dw4FHx_XPmkhx7PsQ" \
'https://kolla-victoria-ubuntu20-1.triliodata.demo:9311/v1/secrets/f3b2fce0-3c7b-4728-b178-7eb8b8ebc966/metadata'workloadmgr license-create <license_file>qemu-img info 85b645c5-c1ea-4628-b5d8-1faea0e9d549
image: 85b645c5-c1ea-4628-b5d8-1faea0e9d549
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 21M
cluster_size: 65536
backing file: /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW0=/workload_3c2fbee5-ad90-4448-b009-5047bcffc2ea/snapshot_f4874ed7-fe85-4d7d-b22b-082a2e068010/vm_id_9894f013-77dd-4514-8e65-818f4ae91d1f/vm_res_id_9ae3a6e7-dffe-4424-badc-bc4de1a18b40_vda/a6289269-3e72-4085-adca-e228ba656984
Format specific information:
compat: 1.1
lazy refcounts: false
refcount bits: 16
corrupt: false# echo -n 10.10.2.20:/upstream | base64
MTAuMTAuMi4yMDovdXBzdHJlYW0=#mount --bind <mount-path1> <mount-path2>#vi /etc/fstab
<mount-path1> <mount-path2> none bind 0 0workloadmgr trust-listworkloadmgr trust-show <trust_id>workloadmgr trust-create [--is_cloud_trust {True,False}] <role_name>workloadmgr trust-delete <trust_id>Please ask your Trilio Customer Success Manager or Engineer.
This page will be updated once the script is publicly available../backing_file_update.sh /var/triliovault-mounts/<base64>/workload_<workload_id>/tmp/backing_file_update.log



pcs resource create dashboard_ip ocf:heartbeat:IPaddr2 ip=<new_vip> cidr_netmask=<netmask> nic=<new_nw_interface> op monitor interval=30s
pcs constraint colocation add dashboard_ip virtual_ipserver {
listen <dashboard_ip>:8000 ssl ;
ssl_certificate "/opt/stack/data/cert/workloadmgr.cert";
ssl_certificate_key "/opt/stack/data/cert/workloadmgr.key";
keepalive_timeout 65;
proxy_read_timeout 1800;
access_log on;
location / {
proxy_set_header Host $host:$server_port;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass https://<virtual_ip>:443;
}
}
server {
listen <dashboard_ip>:3001 ssl ;
ssl_certificate "/opt/stack/data/cert/workloadmgr.cert";
ssl_certificate_key "/opt/stack/data/cert/workloadmgr.key";
keepalive_timeout 65;
proxy_read_timeout 1800;
access_log on;
location / {
proxy_set_header Host $host:$server_port;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass https://<virtual_ip>:3001;
}
}
iptables -A INPUT -p tcp -s tvm1,tvm2,tvm3 --dport 80 -j ACCEPT
iptables -A INPUT -p tcp -s tvm1,tvm2,tvm3 --dport 443 -j ACCEPT
iptables -A INPUT -p tcp --dport 80 -j DROP
iptables -A INPUT -p tcp --dport 443 -j DROPhttps://<dashboard_ip>:8000Learn about spinning up the Trilio VM
workloadmgr disable-scheduler --workloadids <workloadid>workloadmgr enable-scheduler --workloadids <workloadid>workloadmgr scheduler-trust-validate <workload_id># echo -n 10.10.2.20:/Trilio_Backup | base64
MTAuMTAuMi4yMDovVHJpbGlvX0JhY2t1cA==# echo -n /Trilio_Backup | base64
L1RyaWxpb19CYWNrdXA=cd /opt/openstack-ansible/playbooks
openstack-ansible os-tvault-install.yml --tags "tvault-all-uninstall"triliovault-cfg-scripts/common/triliovault_nfs_map_input.yml





--snapshotids <snapshotid> ➡️ Search only in specified snapshot ids snapshot-id: include the instance with this UUID
cd /opt/openstack-ansible/playbooks
openstack-ansible lxc-containers-destroy.yml --limit "DMPAI CONTAINER_NAME"#tvault-dmapi
tvault-dmapi_hosts:
infra-1:
ip: 172.26.0.3
infra-2:
ip: 172.26.0.4
#tvault-datamover
tvault_compute_hosts:
infra-1:
ip: 172.26.0.7
infra-2:
ip: 172.26.0.8# Datamover haproxy setting
haproxy_extra_services:
- service:
haproxy_service_name: datamover_service
haproxy_backend_nodes: "{{ groups['dmapi_all'] | default([]) }}"
haproxy_ssl: "{{ haproxy_ssl }}"
haproxy_port: 8784
haproxy_balance_type: http
haproxy_backend_options:
- "httpchk GET / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"rm /opt/openstack-ansible/inventory/env.d/tvault-dmapi.yml source cloudadmin.rc
openstack endpoint delete "internal datamover service endpoint_id"
openstack endpoint delete "public datamover service endpoint_id"
openstack endpoint delete "admin datamover service endpoint_id"lxc-attach -n "GALERA CONTAINER NAME"
mysql -u root -p "root password"
DROP DATABASE dmapi;
DROP USER dmapi;lxc-attach -n "RABBITMQ CONTAINER NAME"
rabbitmqctl delete_user dmapi
rabbitmqctl delete_vhost /dmapirm /etc/haproxy/conf.d/datamover_servicefrontend datamover_service-front-1
bind ussuriubuntu.triliodata.demo:8784 ssl crt /etc/ssl/private/haproxy.pem ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
option httplog
option forwardfor except 127.0.0.0/8
reqadd X-Forwarded-Proto:\ https
mode http
default_backend datamover_service-back
frontend datamover_service-front-2
bind 172.26.1.2:8784
option httplog
option forwardfor except 127.0.0.0/8
mode http
default_backend datamover_service-back
backend datamover_service-back
mode http
balance leastconn
stick store-request src
stick-table type ip size 256k expire 30m
option forwardfor
option httplog
option httpchk GET / HTTP/1.0\r\nUser-agent:\ osa-haproxy-healthcheck
server controller_dmapi_container-bf17d5b3 172.26.1.75:8784 check port 8784 inter 12000 rise 1 fall 1systemctl restart haproxyrm -rf /opt/config-certs/rabbitmq
rm -rf /opt/config-certs/s3virsh listvirsh destroy <Trilio VM Name or ID>virsh undefine <Trilio VM name>192.168.1.33:/var/share1
192.168.1.34:/var/share1
192.168.1.35:/var/share1prod-compute-1.trilio.demo
prod-compute-2.trilio.demo
prod-compute-3.trilio.demo
.
.
.
prod-compute-30.trilio.democompute_bare.trilio.demo
compute_virtualmulti_ip_nfs_shares:
- "192.168.1.34:/var/share1": ['prod-compute-[1:10].trilio.demo', 'compute_bare.trilio.demo']
"192.168.1.35:/var/share1": ['prod-compute-[11:20].trilio.demo', 'compute_virtual']
"192.168.1.33:/var/share1": ['prod-compute-[21:30].trilio.demo']
single_ip_nfs_shares: []multi_ip_nfs_shares:
- "192.168.1.34:/var/share1": ['172.30.3.[11:20]', '172.30.4.40']
"192.168.1.35:/var/share1": ['172.30.3.[21:30]', '172.30.4.50']
"192.168.1.33:/var/share1": ['172.30.3.[31:40]']
single_ip_nfs_shares: [](undercloud) [stack@ucqa161 ~]$ openstack server list
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+
| ID | Name | Status | Networks | Image | Flavor |
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+
| 8c3d04ae-fcdd-431c-afa6-9a50f3cb2c0d | overcloudtrain1-controller-2 | ACTIVE | ctlplane=172.30.5.18 | overcloud-full | control |
| 103dfd3e-d073-4123-9223-b8cf8c7398fe | overcloudtrain1-controller-0 | ACTIVE | ctlplane=172.30.5.11 | overcloud-full | control |
| a3541849-2e9b-4aa0-9fa9-91e7d24f0149 | overcloudtrain1-controller-1 | ACTIVE | ctlplane=172.30.5.25 | overcloud-full | control |
| 74a9f530-0c7b-49c4-9a1f-87e7eeda91c0 | overcloudtrain1-novacompute-0 | ACTIVE | ctlplane=172.30.5.30 | overcloud-full | compute |
| c1664ac3-7d9c-4a36-b375-0e4ee19e93e4 | overcloudtrain1-novacompute-1 | ACTIVE | ctlplane=172.30.5.15 | overcloud-full | compute |
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+workloadmgr filepath-search [--snapshotids <snapshotid>]
[--end_filter <end_filter>]
[--start_filter <start_filter>]
[--date_from <date_from>]
[--date_to <date_to>]
<vm_id> <file_path>workloadmgr snapshot-list --all=True
workloadmgr restore-listpcs statuspcs status
Cluster name: triliovault
WARNINGS:
Corosync and pacemaker node names do not match (IPs used in setup?)
Stack: corosync
Current DC: tvm3 (version 1.1.23-1.el7_9.1-9acf116022) - partition with quorum
Last updated: Thu Aug 26 12:10:32 2021
Last change: Thu Aug 26 08:02:51 2021 by root via crm_resource on tvm1
3 nodes configured
8 resource instances configured
Online: [ tvm1 tvm2 tvm3 ]
Full list of resources:
virtual_ip (ocf::heartbeat:IPaddr2): Started tvm1
virtual_ip_public (ocf::heartbeat:IPaddr2): Started tvm1
virtual_ip_admin (ocf::heartbeat:IPaddr2): Started tvm1
virtual_ip_internal (ocf::heartbeat:IPaddr2): Started tvm1
wlm-cron (systemd:wlm-cron): Started tvm1
Clone Set: lb_nginx-clone [lb_nginx]
Started: [ tvm1 ]
Stopped: [ tvm2 tvm3 ]
Daemon Status:
corosync: active/enabled
pacemaker: active/enabled
pcsd: active/enabledsystemctl stop wlm-api
systemctl stop wlm-scheduler
systemctl stop wlm-workloads
systemctl stop mysqld
rabbitmqctl stopreboot
shutdownsystemctl stop wlm-api
systemctl stop wlm-scheduler
systemctl stop wlm-workloads
systemctl stop mysqld
rabbitmqctl stopshutdownsystemctl stop wlm-api
systemctl stop wlm-scheduler
systemctl stop wlm-workloads
systemctl stop mysqld
rabbitmqctl stop
pcs resource stop wlm-cron
pcs resource stop lb_nginx-cloneshutdowngalera_new_cluster[root@TVM1 ssl]# cd /etc/tvault/ssl/
[root@TVM1 ssl]# ls -lisa server*
577678 0 lrwxrwxrwx 1 root root 8 Jan 21 14:36 server.crt -> TVM1.crt
577672 0 lrwxrwxrwx 1 root root 8 Jan 21 14:36 server.key -> TVM1.key
1178820 0 lrwxrwxrwx 1 root root 8 Jan 21 14:36 server.pem -> TVM1.pemdef main():
# configure the networking
#create_ssl_certificates()
http_thread = Thread(target=main_http)
http_thread.daemon = True # thread dies with the program
http_thread.start()
bottle.debug(True)
srv = SSLWSGIRefServer(host='::', port=443)
bottle.run(server=srv, app=app, quiet=False, reloader=False)[root@TVM1 ~]# systemctl restart tvault-config
[root@TVM1 ~]# pcs resource restart lb_nginx-clone
lb_nginx-clone successfully restartedworkloadmgr workload-get-importworkloads-list [--project_id <project_id>]workloadmgr workload-importworkloads [--workloadids <workloadid>]workloadmgr workload-get-importworkloads-progres --jobid <jobId>workloadmgr workload-get-orphaned-workloads-list [--migrate_cloud {True,False}]
[--generate_yaml {True,False}]workloadmgr workload-reassign-workloads
[--old_tenant_ids <old_tenant_id>]
[--new_tenant_id <new_tenant_id>]
[--workload_ids <workload_id>]
[--user_id <user_id>]
[--migrate_cloud {True,False}]
[--map_file <map_file>]reassign_mappings:
- old_tenant_ids: [] #user can provide list of old_tenant_ids or workload_ids
new_tenant_id: new_tenant_id
user_id: user_id
workload_ids: [] #user can provide list of old_tenant_ids or workload_ids
migrate_cloud: True/False #Set to True if want to reassign workloads from
# other clouds as well. Default is False
- old_tenant_ids: [] #user can provide list of old_tenant_ids or workload_ids
new_tenant_id: new_tenant_id
user_id: user_id
workload_ids: [] #user can provide list of old_tenant_ids or workload_ids
migrate_cloud: True/False #Set to True if want to reassign workloads from
# other clouds as well. Default is False#For RHEL and centos
yum install genisoimage
#For Ubuntu
apt-get install genisoimage[root@kvm]# cat meta-data
instance-id: triliovault
network-interfaces: |
auto eth0
iface eth0 inet static
address 158.69.170.20
netmask 255.255.255.0
gateway 158.69.170.30
dns-nameservers 11.11.0.51
local-hostname: localhost[root@kvm]# cat user-data
#cloud-config
chpasswd:
list: |
root:password1
stack:password2
expire: Falsegenisoimage -output tvault-firstboot-config.iso -volid cidata -joliet -rock user-data meta-datatar Jxvf TrilioVault_file.tar.xzvirt-install -n triliovault-vm --memory 24576 --vcpus 8 \
--os-type linux \
--disk tvault-appliance-os-3.0.154.qcow2,device=disk,bus=virtio,size=40 \
--network bridge=virbr0,model=virtio \
--network bridge=virbr1,model=virtio \
--graphics none \
--import \
--disk path=tvault-firstboot-config.iso,device=cdromsudo yum remove cloud-init[root@TVM1 ~]# id nova
uid=42436(nova) gid=42436(nova) groups=42436(nova),990(libvirt),36(kvm)## Download the shell script
$ curl -O https://raw.githubusercontent.com/trilioData/triliovault-cfg-scripts/master/common/nova_userid.sh
## Assign executable permissions
$ chmod +x nova_userid.sh
## Execute the shell script to change 'nova' user and group id to '42436'
$ ./nova_userid.sh
## Ignore any errors and verify that 'nova' user and group id has changed to '42436'
$ id nova
uid=42436(nova) gid=42436(nova) groups=42436(nova),990(libvirt),36(kvm)Learn about encrypting Trilio workloads with Barbican
./backing_file_update.sh /var/triliovault-mounts/<base64>/workload_<workload_id># echo -n 10.10.2.20:/Trilio_Backup | base64
MTAuMTAuMi4yMDovVHJpbGlvX0JhY2t1cA==# echo -n /Trilio_Backup | base64
L1RyaWxpb19CYWNrdXA=mkdir /var/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=/
mount --bind /var/triliovault-mounts/L21udC90dmF1bHQvdHZtNA==/ /var/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=
chmod 777 /var/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=/mkdir /var/lib/nova/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=/
mount --bind /var/lib/nova/triliovault-mounts/L21udC90dmF1bHQvdHZtNA==/ /var/lib/nova/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=
chmod 777 /var/lib/nova/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=/mkdir /var/trilio/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=/
mount --bind /var/trilio/triliovault-mounts/L21udC90dmF1bHQvdHZtNA==/ /var/trilio/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=
chmod 777 /var/trilio/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=/mkdir /var/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=/
mount --bind /var/triliovault-mounts/L21udC90dmF1bHQvdHZtNA==/ /var/trilio/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=
chmod 777 /var/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=/juju exec [-m <model>] --application trilio-data-mover "sudo -u nova mkdir /var/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=/"
juju exec [-m <model>] --application trilio-wlm "sudo -u nova mkdir /var/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=/juju exec [-m <model>] --application trilio-data-mover "sudo mount --bind /var/triliovault-mounts/L21udC90dmF1bHQvdHZtNA==/ /var/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=/"
juju exec [-m <model>] --application trilio-wlm "sudo mount --bind /var/triliovault-mounts/L21udC90dmF1bHQvdHZtNA==/ /var/triliovault-mounts/MTkyLjE2OC4xLjM1Oi9tbnQvdHZhdWx0L3R2bTQ=/"| Error message | None |+----------------+-------------------------------------------------------------------------------------------+| Status | ACTIVE || Error code | None || Error message | None |+----------------+--------------------------------------------------------------------------------------------+| Secret type | symmetric || Mode | ctr || Expiration | None |+---------------+--------------------------------------------------------------------------------------------+()[root@overcloudtrain1-controller-0 /]#

# For RHOSP13
systemctl disable tripleo_trilio_dmapi.service
systemctl stop tripleo_trilio_dmapi.service
docker stop trilio_dmapi
# For RHOSP16 onwards
systemctl disable tripleo_trilio_dmapi.service
systemctl stop tripleo_trilio_dmapi.service
podman stop trilio_dmapi# For RHOSP13
docker rm trilio_dmapi
docker rm trilio_datamover_api_init_log
docker rm trilio_datamover_api_db_sync
# For RHOSP16 onwards
podman rm trilio_dmapi
podman rm trilio_datamover_api_init_log
podman rm trilio_datamover_api_db_sync
## If present, remove below container as well
podman rm container-puppet-triliodmapirm -rf /var/lib/config-data/puppet-generated/triliodmapi
rm /var/lib/config-data/puppet-generated/triliodmapi.md5sum
rm -rf /var/lib/config-data/triliodmapi*rm -rf /var/log/containers/trilio-datamover-api/# For RHOSP13
docker stop trilio_datamover
# For RHOSP16 onwards
systemctl disable tripleo_trilio_datamover.service
systemctl stop tripleo_trilio_datamover.service
podman stop trilio_datamover# For RHOSP13
docker rm trilio_datamover
# For RHOSP16 onwards
podman rm trilio_datamover
## If present, remove below container as well
podman rm container-puppet-triliodmapi## Following steps applicable for all supported RHOSP releases.
# Check triliovault backup target mount point
mount | grep trilio
# Unmount it
-- If it's NFS (COPY UUID_DIR from your compute host using above command)
umount /var/lib/nova/triliovault-mounts/<UUID_DIR>
-- If it's S3
umount /var/lib/nova/triliovault-mounts
# Verify that it's unmounted
mount | grep trilio
df -h | grep trilio
# Remove mount point directory after verifying that backup target unmounted successfully.
# Otherwise actual data from backup target may get cleaned.
rm -rf /var/lib/nova/triliovault-mountsrm -rf /var/lib/config-data/puppet-generated/triliodm/
rm /var/lib/config-data/puppet-generated/triliodm.md5sum
rm -rf /var/lib/config-data/triliodm*rm -rf /var/log/containers/trilio-datamover/listen trilio_datamover_api
bind 172.25.3.60:13784 transparent ssl crt /etc/pki/tls/private/overcloud_endpoint.pem
bind 172.25.3.60:8784 transparent
http-request set-header X-Forwarded-Proto https if { ssl_fc }
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
http-request set-header X-Forwarded-Port %[dst_port]
option httpchk
option httplog
server overcloud-controller-0.internalapi.localdomain 172.25.3.59:8784 check fall 5 inter 2000 rise 2# For RHOSP13
docker restart haproxy-bundle-docker-0
# For RHOSP16 onwards
podman restart haproxy-bundle-podman-0openstack service delete dmapi
openstack user delete dmapi## On RHOSP13, run following command on node where database service runs
docker exec -ti -u root galera-bundle-docker-0 mysql -u root
## On RHOSP16
podman exec -it galera-bundle-podman-0 mysql -u root## Clean database
DROP DATABASE dmapi;
## Clean dmapi user
=> List 'dmapi' user accounts
MariaDB [mysql]> select user, host from mysql.user where user='dmapi';
+-------+-------------+
| user | host |
+-------+-------------+
| dmapi | 172.25.2.10 |
| dmapi | 172.25.2.8 |
+-------+-------------+
2 rows in set (0.00 sec)
=> Delete those user accounts
MariaDB [mysql]> DROP USER [email protected];
Query OK, 0 rows affected (0.82 sec)
MariaDB [mysql]> DROP USER [email protected];
Query OK, 0 rows affected (0.05 sec)
=> Verify that dmapi user got cleaned
MariaDB [mysql]> select user, host from mysql.user where user='dmapi';
Empty set (0.00 sec)virsh listvirsh destroy <Trilio VM Name or ID>virsh undefine <Trilio VM name>pcs resource enable wlm-cron
[root@TVM2 ~]# pcs resource disable wlm-cron
[root@TVM2 ~]# systemctl status wlm-cron
● wlm-cron.service - workload's scheduler cron service
Loaded: loaded (/etc/systemd/system/wlm-cron.service; disabled; vendor preset : disabled)
Active: inactive (dead)
Jun 11 08:27:06 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:06 - INFO - 1...t
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 140686268624368 Child 11389 ki...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - 1...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Shutting down thread pool
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...l
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Stopping the threads
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...s
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: All threads are stopped succes...y
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - A...y
Jun 11 08:27:09 TVM2 systemd[1]: Stopped workload's scheduler cron service.
Hint: Some lines were ellipsized, use -l to show in full.
[root@TVM2 ~]# pcs resource show wlm-cron
Resource: wlm-cron (class=systemd type=wlm-cron)
Meta Attrs: target-role=Stopped
Operations: monitor interval=30s on-fail=restart timeout=300s (wlm-cron-monito r-interval-30s)
start interval=0s on-fail=restart timeout=300s (wlm-cron-start-int erval-0s)
stop interval=0s timeout=300s (wlm-cron-stop-interval-0s)
[root@TVM2 ~]# ps -ef | grep -i workloadmgr-cron
root 15379 14383 0 08:27 pts/0 00:00:00 grep --color=auto -i workloadmgr -cron
cd /opt/
wget https://raw.githubusercontent.com/trilioData/triliovault-cfg-scripts/4.3.2/TVOAppliance/hf_upgrade.sh
chmod +x hf_upgrade.sh./hf_upgrade.sh --all
OR
./hf_upgrade.sh -a
pcs resource enable wlm-cron
cd /<tvo_packages_download_path>/
wget https://raw.githubusercontent.com/trilioData/triliovault-cfg-scripts/4.3.2/TVOAppliance/hf_upgrade.sh
chmod +x hf_upgrade.sh
# Download the upgraded packages
./hf_upgrade.sh --downloadonly
scp <tvo_packages_download_path>/* root@<TrilioVault_node_IP>:/<path_to_upgrade_package>/systemctl status tvault-config wlm-workloads wlm-api wlm-scheduler
pcs status (on primary node)
systemctl status wlm-cron (on primary node)
systemctl status tvault-object-store (only if Trilio configured with S3 backend storage)ps -ef | grep workloadmgr-cron | grep -v grep
# Above command should show only 2 processes running; sample below
[root@tvm6 ~]# ps -ef | grep workloadmgr-cron | grep -v grep
nova 8841 1 2 Jul28 ? 00:40:44 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-cron --config-file=/etc/workloadmgr/workloadmgr.conf
nova 8898 8841 0 Jul28 ? 00:07:03 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-cron --config-file=/etc/workloadmgr/workloadmgr.confcd <path_to_upgrade_package>/
./hf_upgrade.sh --installonly
HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Mon, 09 Nov 2020 13:23:25 GMT
Content-Type: application/json
Content-Length: 244
Connection: keep-alive
X-Compute-Request-Id: req-bdfd3fb8-5cbf-4108-885f-63160426b2fa
{
"file_search":{
"created_at":"2020-11-09T13:23:25.698534",
"updated_at":null,
"id":14,
"deleted_at":null,
"status":"executing",
juju export-bundle --filename openstack_base_file.yamljuju deploy --dry-run ./openstack_base_file.yaml --overlay <Trilio bundle path>juju deploy ./openstack_base_file.yaml --overlay <Trilio bundle path>juju run-action --wait trilio-wlm/leader create-cloud-admin-trust password=<openstack admin password>juju run --wait trilio-wlm/leader create-cloud-admin-trust password=<openstack admin password>juju attach-resource trilio-wlm license=<Path to trilio license file>juju run-action --wait trilio-wlm/leader create-licensejuju run --wait trilio-wlm/leader create-licenseHTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Mon, 09 Nov 2020 13:24:28 GMT
Content-Type: application/json
Content-Length: 819
Connection: keep-alive
X-Compute-Request-Id: req-d57bea9a-9968-4357-8743-e0b906466063
{
"file_search":{
"created_at":"2020-11-09T13:23:25.000000",
"updated_at":"2020-11-09T13:23:48.000000",
"id":14,
"deleted_at":null,
"status":"completed",
"error_msg":null,
"filepath":"/etc/h*",
"json_resp":"[
{
"ed4f29e8-7544-4e1c-af8a-a76031211926":[
{
"/dev/vda1":[
"/etc/hostname",
"/etc/hosts"
],
"/etc/hostname":{
"dev":"2049",
"ino":"32",
"mode":"33204",
"nlink":"1",
"uid":"0",
"gid":"0",
"rdev":"0",
"size":"1",
"blksize":"1024",
"blocks":"2",
"atime":"1603455255",
"mtime":"1603455255",
"ctime":"1603455255"
},
"/etc/hosts":{
"dev":"2049",
"ino":"127",
"mode":"33204",
"nlink":"1",
"uid":"0",
"gid":"0",
"rdev":"0",
"size":"37",
"blksize":"1024",
"blocks":"2",
"atime":"1603455257",
"mtime":"1431011050",
"ctime":"1431017172"
}
}
]
}
]",
"vm_id":"08dab61c-6efd-44d3-a9ed-8e789d338c1b"
}
}{
"file_search":{
"start":<Integer>,
"end":<Integer>,
"filepath":"<Reg-Ex String>",
"date_from":<Date Format: YYYY-MM-DDTHH:MM:SS>,
"date_to":<Date Format: YYYY-MM-DDTHH:MM:SS>,
"snapshot_ids":[
"<Snapshot-ID>"
],
"vm_id":"<VM-ID>"
}
}--policy-fields <key=key-name> ➡️ Specify following key value pairs for policy fields Specify option multiple times to include multiple keys. 'interval' : '1 hr' 'retention_policy_type' : 'Number of Snapshots to Keep' or 'Number of days to retain Snapshots' 'retention_policy_value' : '30' 'fullbackup_interval' : '-1' (Enter Number of incremental snapshots to take Full Backup between 1 to 999, '-1' for 'NEVER' and '0' for 'ALWAYS')For example --policy-fields interval='1 hr' --policy-fields retention_policy_type='Number of Snapshots to Keep '--policy-fields retention_policy_value='30' --policy- fields fullbackup_interval='2'<policy_id>➡️policy to be assigned or removed/etc/kolla/passwords.yml➡️ Trilio entries had been appended at the end of the fileworkloadmgr policy-listworkloadmgr policy-show <policy_id>workloadmgr policy-create --policy-fields <key=key-name>
[--display-description <display_description>]
[--metadata <key=key-name>]
<display_name>workloadmgr policy-update [--display-name <display-name>]
[--display-description <display-description>]
[--policy-fields <key=key-name>]
[--metadata <key=key-name>]
<policy_id>workloadmgr policy-assign [--add_project <project_id>]
[--remove_project <project_id>]
<policy_id>workloadmgr policy-delete <policy_id>docker stop triliovault_datamover_apidocker rm triliovault_datamover_apirm -rf /etc/kolla/triliovault-datamover-apirm -rf /var/log/kolla/triliovault-datamover-api/docker stop triliovault_datamoverdocker rm triliovault_datamoverrm -rf /etc/kolla/triliovault-datamoverrm -rf /var/log/kolla/triliovault-datamover/rm /etc/kolla/haproxy/services.d/triliovault-datamover-api.cfg
docker restart haproxykolla-ansible -i multinode deployopenstack service delete dmapi
openstack user delete dmapimysql -u root -pDROP DATABASE dmapi;
DROP USER dmapi;virsh listvirsh destroy <Trilio VM Name or ID>virsh undefine <Trilio VM name>#check current mount point
[root@compute ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 7.8G 0 7.8G 0% /dev
tmpfs 7.8G 0 7.8G 0% /dev/shm
tmpfs 7.8G 26M 7.8G 1% /run
tmpfs 7.8G 0 7.8G 0% /sys/fs/cgroup
/dev/mapper/cl-root 280G 12G 269G 5% /
/dev/sda1 976M 197M 713M 22% /boot
192.168.1.34:/mnt/tvault/42436 2.5T 1005G 1.5T 41% /var/trilio/triliovault-mounts/MTkyLjE2OC4xLjM0Oi9tbnQvdHZhdWx0LzQyNDM2
#Stop triliovault_datamover
[root@compute ~]# docker stop triliovault_datamover
triliovault_datamover
[root@compute ~]#
#Delete triliovault_datamover
[root@compute ~]# docker rm triliovault_datamover
triliovault_datamover
[root@compute ~]#
#unmount mount point
[root@compute ~]# umount /var/trilio/triliovault-mounts/MTkyLjE2OC4xLjM0Oi9tbnQvdHZhdWx0LzQyNDM2
#check mount point is unmounted successfully
[root@compute ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 7.8G 0 7.8G 0% /dev
tmpfs 7.8G 0 7.8G 0% /dev/shm
tmpfs 7.8G 26M 7.8G 1% /run
tmpfs 7.8G 0 7.8G 0% /sys/fs/cgroup
/dev/mapper/cl-root 280G 12G 269G 5% /
/dev/sda1 976M 197M 713M 22% /boot
[root@compute ~]#
#Delete mounted dir from compute node
[root@compute trilio]# rm -rf /var/trilio/triliovault-mounts/MTkyLjE2OC4xLjM0Oi9tbnQvdHZhdWx0LzQyNDM2root@controller:~# kolla-ansible -i multinode deploy##Check that all Containers are up and running
#Controller node
root@controller:~# docker ps -a | grep trilio
583b8d42ab42 trilio/ubuntu-binary-trilio-datamover-api:4.1.36-ussuri "dumb-init --single-…" 3 days ago Up 3 days openstack-nova-api-triliodata-plugin
3be25d3819ac trilio/ubuntu-binary-trilio-horizon-plugin:4.1.36-ussuri "dumb-init --single-…" 4 days ago Up 4 days horizon
#Compute node
root@compute:~# docker ps -a | grep trilio
bf52face23fb trilio/ubuntu-binary-trilio-datamover:4.1.36-ussuri "dumb-init --single-…" 3 days ago Up 3 days trilio-datamover
## Verify the backup target has been changed successfully
# In case of switch to NFS
[root@compute ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 7.8G 0 7.8G 0% /dev
tmpfs 7.8G 0 7.8G 0% /dev/shm
tmpfs 7.8G 26M 7.8G 1% /run
tmpfs 7.8G 0 7.8G 0% /sys/fs/cgroup
/dev/mapper/cl-root 280G 12G 269G 5% /
/dev/sda1 976M 197M 713M 22% /boot
192.168.1.34:/mnt/tvault/42436 2.5T 1005G 1.5T 41% /var/trilio/triliovault-mounts/MTkyLjE2OC4xLjM0Oi9tbnQvdHZhdWx0LzQyNDM2
#In case of switch to S3
[root@compute ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 7.8G 0 7.8G 0% /dev
tmpfs 7.8G 0 7.8G 0% /dev/shm
tmpfs 7.8G 34M 7.8G 1% /run
tmpfs 7.8G 0 7.8G 0% /sys/fs/cgroup
/dev/mapper/cl-root 280G 12G 269G 5% /
/dev/sda1 976M 197M 713M 22% /boot
Trilio - - 0.0K - /var/trilio/triliovault-mounts
##Reverify in the triliovault_datamover containers
[root@compute ~]# docker exec -it triliovault_datamover bash
(triliovault-datamover)[nova@compute /]$ df -h
Filesystem Size Used Avail Use% Mounted on
overlay 280G 12G 269G 5% /
tmpfs 7.8G 0 7.8G 0% /sys/fs/cgroup
devtmpfs 7.8G 0 7.8G 0% /dev
tmpfs 7.8G 0 7.8G 0% /dev/shm
/dev/mapper/cl-root 280G 12G 269G 5% /etc/iscsi
tmpfs 6.3G 0 6.3G 0% /var/triliovault/tmpfs
Trilio - - 0.0K - /var/trilio/triliovault-mountsLearn about upgrading Trilio on Canonical OpenStack
'sudo systemctl stop wlm-cron'juju status [-m ] | grep trilio output. All the trilio units will be with the new packages.Learn about Trilio Support for OpenStack Distributions
workloadmgr project-quota-type-listworkloadmgr project-quota-type-show <quota_type_id>workloadmgr project-allowed-quota-create --quota-type-id quota_type_id
--allowed-value allowed_value
--high-watermark high_watermark
--project-id project_idworkloadmgr project-allowed-quota-list <project_id>workloadmgr project-allowed-quota-show <allowed_quota_id>workloadmgr project-allowed-quota-update [--allowed-value <allowed_value>]
[--high-watermark <high_watermark>]
[--project-id <project_id>]
<allowed_quota_id>workloadmgr project-allowed-quota-delete <allowed_quota_id>
--date_from <date_from>➡️ From date in format 'YYYY-MM-DDTHH:MM:SS' eg 2016-10-10T00:00:00, If don't specify time then it takes 00:00 by default--display-name <display-name>➡️Optional snapshot name. (Default=None)HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 11 Nov 2020 15:29:03 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-9d779802-9c65-463a-973c-39cdffcba82e workloadmgr snapshot-list [--workload_id <workload_id>]
[--tvault_node <host>]
[--date_from <date_from>]
[--date_to <date_to>]
[--all {True,False}]workloadmgr workload-snapshot [--full] [--display-name <display-name>]
[--display-description <display-description>]
<workload_id>workloadmgr snapshot-show [--output <output>] <snapshot_id>workloadmgr snapshot-delete <snapshot_id>workloadmgr snapshot-cancel <snapshot_id>systemctl restart wlm-apisystemctl restart wlm-schedulersystemctl restart wlm-workloadspcs resource restart wlm-cronpcs resource restart lb_nginx-clone[root@TVM1 ~]# rabbitmqctl stop
Stopping and halting node rabbit@TVM1 ...
[root@TVM1 ~]# rabbitmq-server -detached
Warning: PID file not written; -detached was passed.
[root@TVM1 ~]# rabbitmqctl cluster_status
Cluster status of node rabbit@TVM1 ...
[{nodes,[{disc,[rabbit@TVM1,rabbit@TVM2,rabbit@TVM3]}]},
{running_nodes,[rabbit@TVM2,rabbit@TVM3,rabbit@TVM1]},
{cluster_name,<<"rabbit@TVM1">>},
{partitions,[{rabbit@TVM2,[rabbit@TVM1,rabbit@TVM3]},
{rabbit@TVM3,[rabbit@TVM1,rabbit@TVM2]}]},
{alarms,[{rabbit@TVM2,[]},{rabbit@TVM3,[]},{rabbit@TVM1,[]}]}]systemctl stop mysqld
systemctl start mysqldsystemctl stop mysqldcat /var/lib/mysql/grastate.dat
# GALERA saved state
version: 2.1
uuid: 353e129f-11f2-11eb-b3f7-76f39b7b455d
seqno: 213576545367
safe_to_bootstrap: 1galera_new_clustersystemctl start mysqldMariaDB [(none)]> show status like 'wsrep_incoming_addresses';
+--------------------------+-------------------------------------------------+
| Variable_name | Value |
+--------------------------+-------------------------------------------------+
| wsrep_incoming_addresses | 10.10.2.13:3306,10.10.2.14:3306,10.10.2.12:3306 |
+--------------------------+-------------------------------------------------+
1 row in set (0.01 sec)
MariaDB [(none)]> show status like 'wsrep_cluster_size';
+--------------------+-------+
| Variable_name | Value |
+--------------------+-------+
| wsrep_cluster_size | 3 |
+--------------------+-------+
1 row in set (0.00 sec)
MariaDB [(none)]> show status like 'wsrep_cluster_state_uuid';
+--------------------------+--------------------------------------+
| Variable_name | Value |
+--------------------------+--------------------------------------+
| wsrep_cluster_state_uuid | 353e129f-11f2-11eb-b3f7-76f39b7b455d |
+--------------------------+--------------------------------------+
1 row in set (0.00 sec)
MariaDB [(none)]> show status like 'wsrep_local_state_comment';
+---------------------------+--------+
| Variable_name | Value |
+---------------------------+--------+
| wsrep_local_state_comment | Synced |
+---------------------------+--------+
1 row in set (0.01 sec)juju ssh <workloadmgr unit name>/<unit-number>
Systemctl restart wlm-api wlm-scheduler wlm-workloads wlm-cronjuju ssh <workloadmgr unit name>/<unit-number>
Systemctl restart wlm-api wlm-scheduler wlm-workloadsjuju ssh <workloadmgr unit name>/<unit-number>
crm_resource --restart -r res_trilio_wlm_wlm_crondocker restart trilio_dmapipodman restart trilio_dmapijuju ssh <trilio-dm-api unit name>/<unit-number>
sudo systemctl restart tvault-datamover-apidocker restart triliovault_datamover_apilxc-stop -n <dmapi container name>
lxc-start -n <dmapi container name>docker restart trilio_datamoverpodman restart trilio_datamoverjuju ssh <trilio-data-mover unit name>/<unit-number>
sudo systemctl restart tvault-contegodocker restart triliovault_datamoverservice tvault-contego restart{
"mount":{
"mount_vm_id":"15185195-cd8d-4f6f-95ca-25983a34ed92",
"options":{
}
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 11 Nov 2020 15:44:42 GMT
Content-Type: application/json
Content-Length: 228
Connection: keep-alive
X-Compute-Request-Id: req-04c6ef90-125c-4a36-9603-af1af001006a
{
"mounted_snapshots":[
{
"snapshot_id":"ed4f29e8-7544-4e1c-af8a-a76031211926",
"snapshot_name":"snapshot",
"workload_id":"4bafaa03-f69a-45d5-a6fc-ae0119c77974",
"mounturl":"[\"http://192.168.100.87\"]",
"status":"mounted"
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 11 Nov 2020 15:44:42 GMT
Content-Type: application/json
Content-Length: 228
Connection: keep-alive
X-Compute-Request-Id: req-04c6ef90-125c-4a36-9603-af1af001006a
{
"mounted_snapshots":[
{
"snapshot_id":"ed4f29e8-7544-4e1c-af8a-a76031211926",
"snapshot_name":"snapshot",
"workload_id":"4bafaa03-f69a-45d5-a6fc-ae0119c77974",
"mounturl":"[\"http://192.168.100.87\"]",
"status":"mounted"
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 11 Nov 2020 16:03:49 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-abf69be3-474d-4cf3-ab41-caa56bb611e4{
"mount":
{
"options": null
}
}juju exec [-m <model>] --unit trilio-wlm/leader "sudo crm configure property maintenance-mode=true"juju exec [-m <model>] --application trilio-wlm "sudo systemctl stop wlm-cron"juju exec [-m <model>] --application trilio-wlm "sudo ps -ef | grep [w]orkloadmgr-cron"deb [trusted=yes] https://apt.fury.io/trilio-4-3/ /juju exec [-m <model>] --application trilio-wlm 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" workloadmgr python3-workloadmgrclient python3-contegoclient s3-fuse-plugin'
juju exec [-m <model>] --application trilio-horizon-plugin 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" tvault-horizon-plugin python-workloadmgrclient'
juju exec [-m <model>] --application trilio-dm-api 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" python3-dmapi'
juju exec [-m <model>] --application trilio-data-mover 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" tvault-contego s3-fuse-plugin'juju exec [-m <model>] --application trilio-wlm 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" workloadmgr python3-workloadmgrclient python3-contegoclient python3-s3-fuse-plugin'
juju exec [-m <model>] --application trilio-horizon-plugin 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" python3-tvault-horizon-plugin python3-workloadmgrclient'
juju exec [-m <model>] --application trilio-dm-api 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" python3-dmapi'
juju exec [-m <model>] --application trilio-data-mover 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" python3-tvault-contego python3-s3-fuse-plugin'trilio-data-mover <package version> active 3 trilio-data-mover jujucharms 8 ubuntu
trilio-dm-api <package version> active 1 trilio-dm-api jujucharms 5 ubuntu
trilio-horizon-plugin <package version> active 1 trilio-horizon-plugin jujucharms 4 ubuntu
trilio-wlm <package version> active 3 trilio-wlm jujucharms 7 ubuntujuju exec [-m <model>] --unit trilio-wlm/leader "alembic -c /etc/workloadmgr/alembic.ini upgrade heads"juju exec [-m <model>] --unit trilio-wlm/leader "alembic -c /etc/workloadmgr/alembic.ini current"juju exec [-m <model>] --application trilio-wlm "sudo systemctl stop wlm-cron"juju exec [-m <model>] --unit trilio-wlm/leader "sudo crm configure property maintenance-mode=false"juju exec [-m <model>] --application trilio-wlm "sudo systemctl status wlm-cron"Login to trilio unit and run "sudo dpkg --configure -a"
It will ask for user input, hit enter and log out from the unit.
From mass node run command "juju resolve <trilio unit name>" i. Before proceeding for upgrade OR reinitialize, fetch the list of ALL workload IDs which are NOT in error OR deleted state from database
Query : select id from workloads where status not in ('deleted','error')
ii. Use IDs from this list to create the import CLI command parameters.
Sample : --workloadids <wl_id1> --workloadids <wl_id2> …. etc. Shell command below to do the same.
wlIdList.txt to have all workload IDs; one ID per line.
iii. awk '{print " --workloadids "$1}' wlIdList.txt | tr -d '\n'
iv Append output of above command to the import command.
workloadmgr workload-importworkloads <Command output>
Eg : workloadmgr workload-importworkloads --workloadids ff24945f-7bef-498d-98eb-d727ec85bc7b --workloadids a15948b4-942c-47e2-85c5-06cad697010f[root@controller ~]# wget http://10.10.2.15:8085/yum-repo/queens/workloadmgrclient-4.0.115-4.0.noarch.rpm
--2021-03-08 15:36:37-- http://10.10.2.15:8085/yum-repo/queens/workloadmgrclient-4.0.115-4.0.noarch.rpm
Connecting to 10.10.2.15:8085... connected.
HTTP request sent, awaiting response... 200 OK
Length: 155976 (152K) [application/x-rpm]
Saving to: ‘workloadmgrclient-4.0.115-4.0.noarch.rpm’
100%[======================================>] 1,55,976 --.-K/s in 0.001s
2021-03-08 15:36:37 (125 MB/s) - ‘workloadmgrclient-4.0.115-4.0.noarch.rpm’ saved [155976/155976]
[root@controller ~]# yum install workloadmgrclient-4.0.115-4.0.noarch.rpm
Loaded plugins: fastestmirror
Examining workloadmgrclient-4.0.115-4.0.noarch.rpm: workloadmgrclient-4.0.115-4. 0.noarch
Marking workloadmgrclient-4.0.115-4.0.noarch.rpm to be installed
Resolving Dependencies
--> Running transaction check
---> Package workloadmgrclient.noarch 0:4.0.115-4.0 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================
Package Arch Version Repository Size
================================================================================
Installing:
workloadmgrclient
noarch 4.0.115-4.0 /workloadmgrclient-4.0.115-4.0.noarch 700 k
Transaction Summary
================================================================================
Install 1 Package
Total size: 700 k
Installed size: 700 k
Is this ok [y/d/N]: y
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : workloadmgrclient-4.0.115-4.0.noarch 1/1
Verifying : workloadmgrclient-4.0.115-4.0.noarch 1/1
Installed:
workloadmgrclient.noarch 0:4.0.115-4.0
Complete![trilio]
name=Trilio Repository
baseurl=http://trilio:[email protected]:8283/triliovault-<Trilio-Release>/yum/
enabled=1
gpgcheck=0[root@controller ~]# cat /etc/yum.repos.d/trilio.repo
[trilio]
name=Trilio Repository
baseurl=http://trilio:[email protected]:8283/triliovault-4.0/yum/
enabled=1
gpgcheck=0
[root@controller ~]# yum install workloadmgrclient
Loaded plugins: fastestmirror
Determining fastest mirrors
* base: centos-canada.vdssunucu.com.tr
* centos-ceph-nautilus: mirror.its.dal.ca
* centos-nfs-ganesha28: centos.mirror.colo-serv.net
* centos-openstack-train: centos-canada.vdssunucu.com.tr
* centos-qemu-ev: centos-canada.vdssunucu.com.tr
* extras: centos-canada.vdssunucu.com.tr
* updates: centos-canada.vdssunucu.com.tr
base | 3.6 kB 00:00:00
centos-ceph-nautilus | 3.0 kB 00:00:00
centos-nfs-ganesha28 | 3.0 kB 00:00:00
centos-openstack-train | 3.0 kB 00:00:00
centos-qemu-ev | 3.0 kB 00:00:00
extras | 2.9 kB 00:00:00
trilio | 2.9 kB 00:00:00
updates | 2.9 kB 00:00:00
(1/3): extras/7/x86_64/primary_db | 225 kB 00:00:00
(2/3): centos-openstack-train/7/x86_64/primary_db | 1.1 MB 00:00:00
(3/3): updates/7/x86_64/primary_db | 5.7 MB 00:00:00
Resolving Dependencies
--> Running transaction check
---> Package workloadmgrclient.noarch 0:4.0.116-4.0 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
===========================================================================================================================================================================================================================================================================================================================================================================================================================================
Package Arch Version Repository Size
===========================================================================================================================================================================================================================================================================================================================================================================================================================================
Installing:
workloadmgrclient noarch 4.0.116-4.0 trilio 152 k
Transaction Summary
===========================================================================================================================================================================================================================================================================================================================================================================================================================================
Install 1 Package
Total download size: 152 k
Installed size: 700 k
Is this ok [y/d/N]: y
Downloading packages:
workloadmgrclient-4.0.116-4.0.noarch.rpm | 152 kB 00:00:00
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : workloadmgrclient-4.0.116-4.0.noarch 1/1
Verifying : workloadmgrclient-4.0.116-4.0.noarch 1/1
Installed:
workloadmgrclient.noarch 0:4.0.116-4.0
Complete!root@ubuntu:~# curl -Og6 http://10.10.2.15:8085/deb-repo/deb-repo/python3-workloadmgrclient_4.0.115_all.deb
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 116k 100 116k 0 0 899k 0 --:--:-- --:--:-- --:--:-- 982k
root@ubuntu:~# apt-get install ./python3-workloadmgrclient_4.0.115_all.deb -y
Reading package lists... Done
Building dependency tree
Reading state information... Done
Note, selecting 'python3-workloadmgrclient' instead of './python3-workloadmgrclient_4.0.115_all.deb'
The following NEW packages will be installed:
python3-workloadmgrclient
0 upgraded, 1 newly installed, 0 to remove and 0 not upgraded.
Need to get 0 B/120 kB of archives.
After this operation, 736 kB of additional disk space will be used.
Selecting previously unselected package python3-workloadmgrclient.
(Reading database ... 65533 files and directories currently installed.)
Preparing to unpack .../python3-workloadmgrclient_4.0.115_all.deb ...
Unpacking python3-workloadmgrclient (4.0.115) ...
Setting up python3-workloadmgrclient (4.0.115) ...deb [trusted=yes] https://apt.fury.io/triliodata-<Trilio-Version>/ /root@ubuntu:~# cat /etc/apt/sources.list.d/fury.list
deb [trusted=yes] https://apt.fury.io/triliodata-4-0/ /
root@ubuntu:~# apt update
Hit:1 http://security.ubuntu.com/ubuntu bionic-security InRelease
Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease
Hit:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease
Hit:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease
Ign:5 https://apt.fury.io/triliodata-4-0 InRelease
Ign:6 https://apt.fury.io/triliodata-4-0 Release
Ign:7 https://apt.fury.io/triliodata-4-0 Packages
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Get:7 https://apt.fury.io/triliodata-4-0 Packages
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Fetched 84.0 kB in 12s (6930 B/s)
Reading package lists... Done
Building dependency tree
Reading state information... Done
All packages are up to date.
root@ubuntu:~# apt-get install python3-workloadmgrclient
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following NEW packages will be installed:
python3-workloadmgrclient
0 upgraded, 1 newly installed, 0 to remove and 0 not upgraded.
Need to get 0 B/120 kB of archives.
After this operation, 736 kB of additional disk space will be used.
Selecting previously unselected package python3-workloadmgrclient.
(Reading database ... 65533 files and directories currently installed.)
Preparing to unpack .../python3-workloadmgrclient_4.0.115_all.deb ...
Unpacking python3-workloadmgrclient (4.0.115) ...
Setting up python3-workloadmgrclient (4.0.115) ...systemctl | grep wlm
wlm-api.service loaded active running workloadmanager api service
wlm-cron.service loaded active running Cluster Controlled wlm-cron
wlm-scheduler.service loaded active running Cluster Controlled wlm-scheduler
wlm-workloads.service loaded active running workloadmanager workloads servicesystemctl status wlm-api
######
● wlm-api.service - workloadmanager api service
Loaded: loaded (/etc/systemd/system/wlm-api.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2021-11-02 19:41:19 UTC; 2 months 21 days ago
Main PID: 4688 (workloadmgr-api)
CGroup: /system.slice/wlm-api.service
├─4688 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-api --config-file=/etc/workloadmgr/workloadmgr.confsystemctl status wlm-scheduler
######
● wlm-scheduler.service - Cluster Controlled wlm-scheduler
Loaded: loaded (/etc/systemd/system/wlm-scheduler.service; disabled; vendor preset: disabled)
Drop-In: /run/systemd/system/wlm-scheduler.service.d
└─50-pacemaker.conf
Active: active (running) since Sat 2022-01-22 13:49:28 UTC; 1 day 23h ago
Main PID: 9342 (workloadmgr-sch)
CGroup: /system.slice/wlm-scheduler.service
└─9342 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-scheduler --config-file=/etc/workloadmgr/workloadmgr.confsystemctl status wlm-workloads
######
● wlm-workloads.service - workloadmanager workloads service
Loaded: loaded (/etc/systemd/system/wlm-workloads.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2021-11-02 19:51:05 UTC; 2 months 21 days ago
Main PID: 606 (workloadmgr-wor)
CGroup: /system.slice/wlm-workloads.service
├─ 606 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-workloads --config-file=/etc/workloadmgr/workloadmgr.confsystemctl status wlm-cron
######
● wlm-cron.service - Cluster Controlled wlm-cron
Loaded: loaded (/etc/systemd/system/wlm-cron.service; disabled; vendor preset: disabled)
Drop-In: /run/systemd/system/wlm-cron.service.d
└─50-pacemaker.conf
Active: active (running) since Sat 2022-01-22 13:49:28 UTC; 1 day 23h ago
Main PID: 9209 (workloadmgr-cro)
CGroup: /system.slice/wlm-cron.service
├─9209 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-cron --config-file=/etc/workloadmgr/workloadmgr.confpcs status
######
Cluster name: triliovault
WARNINGS:
Corosync and pacemaker node names do not match (IPs used in setup?)
Stack: corosync
Current DC: TVM1 (version 1.1.21-4.el7-f14e36fd43) - partition with quorum
Last updated: Mon Jan 24 13:42:01 2022
Last change: Tue Nov 2 19:07:04 2021 by root via crm_resource on TVM2
3 nodes configured
9 resources configured
Online: [ TVM1 TVM2 TVM3 ]
Full list of resources:
virtual_ip (ocf::heartbeat:IPaddr2): Started TVM2
virtual_ip_public (ocf::heartbeat:IPaddr2): Started TVM2
virtual_ip_admin (ocf::heartbeat:IPaddr2): Started TVM2
virtual_ip_internal (ocf::heartbeat:IPaddr2): Started TVM2
wlm-cron (systemd:wlm-cron): Started TVM2
wlm-scheduler (systemd:wlm-scheduler): Started TVM2
Clone Set: lb_nginx-clone [lb_nginx]
Started: [ TVM2 ]
Stopped: [ TVM1 TVM3 ]
Daemon Status:
corosync: active/enabled
pacemaker: active/enabled
pcsd: active/enabledcurl http://10.10.2.34:8780/v1/8e16700ae3614da4ba80a4e57d60cdb9/workload_types/detail -X GET -H "X-Auth-Project-Id: admin" -H "User-Agent: python-workloadmgrclient" -H "Accept: application/json" -H "X-Auth-Token: gAAAAABe40NVFEtJeePpk1F9QGGh1LiGnHJVLlgZx9t0HRrK9rC5vqKZJRkpAcW1oPH6Q9K9peuHiQrBHEs1-g75Na4xOEESR0LmQJUZP6n37fLfDL_D-hlnjHJZ68iNisIP1fkm9FGSyoyt6IqjO9E7_YVRCTCqNLJ67ZkqHuJh1CXwShvjvjwroot@ansible:~# openstack endpoint list | grep dmapi
| 190db2ce033e44f89de73abcbf12804e | US-WEST-2 | dmapi | datamover | True | public | https://osa-victoria-ubuntu20-2.triliodata.demo:8784/v2 |
| dec1a323791b49f0ac7901a2dc806ee2 | US-WEST-2 | dmapi | datamover | True | admin | http://10.10.10.154:8784/v2 |
| f8c4162c9c1246ffb0190d0d093c48af | US-WEST-2 | dmapi | datamover | True | internal | http://10.10.10.154:8784/v2 |
root@ansible:~# curl http://10.10.10.154:8784
{"versions": [{"id": "v2.0", "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2011-01-21T11:33:21Z", "links": [{"rel": "self", "href": "hlxc-attach -n <dmapi-container-name> (go to dmapi conatiner)
root@controller-dmapi-container-08df1e06:~# systemctl status tvault-datamover-api.service
● tvault-datamover-api.service - TrilioData DataMover API service
Loaded: loaded (/lib/systemd/system/tvault-datamover-api.service; enabled; vendor preset: enabled)
Active: active (running) since Wed 2022-01-12 11:53:39 UTC; 1 day 17h ago
Main PID: 23888 (dmapi-api)
Tasks: 289 (limit: 57729)
Memory: 607.7M
CGroup: /system.slice/tvault-datamover-api.service
├─23888 /usr/bin/python3 /usr/bin/dmapi-api
├─23893 /usr/bin/python3 /usr/bin/dmapi-api
├─23894 /usr/bin/python3 /usr/bin/dmapi-api
├─23895 /usr/bin/python3 /usr/bin/dmapi-api
├─23896 /usr/bin/python3 /usr/bin/dmapi-api
├─23897 /usr/bin/python3 /usr/bin/dmapi-api
├─23898 /usr/bin/python3 /usr/bin/dmapi-api
├─23899 /usr/bin/python3 /usr/bin/dmapi-api
├─23900 /usr/bin/python3 /usr/bin/dmapi-api
├─23901 /usr/bin/python3 /usr/bin/dmapi-api
├─23902 /usr/bin/python3 /usr/bin/dmapi-api
├─23903 /usr/bin/python3 /usr/bin/dmapi-api
├─23904 /usr/bin/python3 /usr/bin/dmapi-api
├─23905 /usr/bin/python3 /usr/bin/dmapi-api
├─23906 /usr/bin/python3 /usr/bin/dmapi-api
├─23907 /usr/bin/python3 /usr/bin/dmapi-api
└─23908 /usr/bin/python3 /usr/bin/dmapi-api
Jan 12 11:53:39 controller-dmapi-container-08df1e06 systemd[1]: Started TrilioData DataMover API service.
Jan 12 11:53:40 controller-dmapi-container-08df1e06 dmapi-api[23888]: Could not load
Jan 12 11:53:40 controller-dmapi-container-08df1e06 dmapi-api[23888]: Could not load
root@compute:~# systemctl status tvault-contego
● tvault-contego.service - Tvault contego
Loaded: loaded (/etc/systemd/system/tvault-contego.service; enabled; vendor preset: enabled)
Active: active (running) since Fri 2022-01-14 05:45:19 UTC; 2s ago
Main PID: 1489651 (python3)
Tasks: 19 (limit: 67404)
Memory: 6.7G (max: 10.0G)
CGroup: /system.slice/tvault-contego.service
├─ 998543 /bin/qemu-nbd -c /dev/nbd45 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,file>
├─ 998772 /bin/qemu-nbd -c /dev/nbd73 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,file>
├─ 998931 /bin/qemu-nbd -c /dev/nbd100 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,fil>
├─ 999147 /bin/qemu-nbd -c /dev/nbd35 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,file>
├─1371322 /bin/qemu-nbd -c /dev/nbd63 --object secret,id=sec0,data=payload-test1 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,fil>
├─1371524 /bin/qemu-nbd -c /dev/nbd91 --object secret,id=sec0,data=payload-test1 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,fil>
└─1489651 /openstack/venvs/nova-22.3.1/bin/python3 /usr/bin/tvault-contego --config-file=/etc/nova/nova.conf --config-file=/etc/tvault-contego/tvault-cont>
Jan 14 05:45:19 compute systemd[1]: Started Tvault contego.
Jan 14 05:45:20 compute sudo[1489653]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/openstack/venvs/nova-22.3.1/bin/nova-rootwrap /etc/nova/rootwrap.conf umou>
Jan 14 05:45:20 compute sudo[1489653]: pam_unix(sudo:session): session opened for user root by (uid=0)
Jan 14 05:45:21 compute python3[1489655]: umount: /var/triliovault-mounts/VHJpbGlvVmF1bHQ=: no mount point specified.
Jan 14 05:45:21 compute sudo[1489653]: pam_unix(sudo:session): session closed for user root
Jan 14 05:45:21 compute tvault-contego[1489651]: 2022-01-14 05:45:21.499 1489651 INFO __main__ [req-48c32a39-38d0-45b9-9852-931e989133c6 - - - - -] CPU Control group m>
Jan 14 05:45:21 compute tvault-contego[1489651]: 2022-01-14 05:45:21.499 1489651 INFO __main__ [req-48c32a39-38d0-45b9-9852-931e989133c6 - - - - -] I/O Control Group m>
lines 1-22/22 (END)root@ansible:~# openstack endpoint list | grep dmapi
| 190db2ce033e44f89de73abcbf12804e | US-WEST-2 | dmapi | datamover | True | public | https://osa-victoria-ubuntu20-2.triliodata.demo:8784/v2 |
| dec1a323791b49f0ac7901a2dc806ee2 | US-WEST-2 | dmapi | datamover | True | admin | http://10.10.10.154:8784/v2 |
| f8c4162c9c1246ffb0190d0d093c48af | US-WEST-2 | dmapi | datamover | True | internal | http://10.10.10.154:8784/v2 |
root@ansible:~# curl http://10.10.10.154:8784
{"versions": [{"id": "v2.0", "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2011-01-21T11:33:21Z", "links": [{"rel": "self", "href": "h[root@controller ~]# docker ps | grep triliovault_datamover_api
3f979c15cedc trilio/centos-binary-trilio-datamover-api:4.2.50-victoria "dumb-init --single-…" 3 days ago Up 3 days triliovault_datamover_api[root@compute1 ~]# docker ps | grep triliovault_datamover
2f1ece820a59 trilio/centos-binary-trilio-datamover:4.2.50-victoria "dumb-init --single-…" 3 days ago Up 3 days triliovault_datamover[root@controller ~]# docker ps | grep horizon
4a004c786d47 trilio/centos-binary-trilio-horizon-plugin:4.2.50-victoria "dumb-init --single-…" 3 days ago Up 3 days (unhealthy) horizonroot@jujumaas:~# juju status | grep trilio
trilio-data-mover 4.2.51 active 3 trilio-data-mover jujucharms 9 ubuntu
trilio-dm-api 4.2.51 active 1 trilio-dm-api jujucharms 7 ubuntu
trilio-horizon-plugin 4.2.51 active 1 trilio-horizon-plugin jujucharms 6 ubuntu
trilio-wlm 4.2.51 active 1 trilio-wlm jujucharms 9 ubuntu
trilio-data-mover/8 active idle 172.17.1.5 Unit is ready
trilio-data-mover/6 active idle 172.17.1.6 Unit is ready
trilio-data-mover/7* active idle 172.17.1.7 Unit is ready
trilio-horizon-plugin/2* active idle 172.17.1.16 Unit is ready
trilio-dm-api/2* active idle 1/lxd/4 172.17.1.27 8784/tcp Unit is ready
trilio-wlm/2* active idle 7 172.17.1.28 8780/tcp Unit is readyOn rhosp13 OS: docker ps | grep trilio-
On other (rhosp16 onwards/tripleo) : podman ps | grep trilio-
[root@overcloudtrain1-controller-0 heat-admin]# podman ps | grep trilio-
e3530d6f7bec ucqa161.ctlplane.trilio.local:8787/trilio/trilio-datamover-api:4.2.47-rhosp16.1 kolla_start 2 weeks ago Up 2 weeks ago trilio_dmapi
f93f7019f934 ucqa161.ctlplane.trilio.local:8787/trilio/trilio-horizon-plugin:4.2.47-rhosp16.1 kolla_start 2 weeks ago Up 2 weeks ago horizonOn rhosp13 OS: docker ps | grep trilio-
On other (rhosp/tripleo) : podman ps | grep trilio-
[root@overcloudtrain3-novacompute-1 heat-admin]# podman ps | grep trilio-
4419b02e075c undercloud162.ctlplane.trilio.local:8787/trilio/trilio-datamover:dev-osp16.2-1-rhosp16.2 kolla_start 2 days ago Up 27 seconds ago trilio_datamover (overcloudtrain1) [stack@ucqa161 ~]$ openstack endpoint list | grep datamover
| 218b2f92569a4d259839fa3ea4d6103a | regionOne | dmapi | datamover | True | internal | https://overcloudtrain1internalapi.trilio.local:8784/v2 |
| 4702c51aa5c24bed853e736499e194e2 | regionOne | dmapi | datamover | True | public | https://overcloudtrain1.trilio.local:13784/v2 |
| c8169025eb1e4954ab98c7abdb0f53f6 | regionOne | dmapi | datamover | True | admin | https://overcloudtrain1internalapi.trilio.local:8784/v2 systemctl | grep wlm
wlm-api.service loaded active running workloadmanager api service
wlm-cron.service loaded active running Cluster Controlled wlm-cron
wlm-scheduler.service loaded active running Cluster Controlled wlm-scheduler
wlm-workloads.service loaded active running workloadmanager workloads servicesystemctl status wlm-api
######
● wlm-api.service - workloadmanager api service
Loaded: loaded (/etc/systemd/system/wlm-api.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2021-11-02 19:41:19 UTC; 2 months 21 days ago
Main PID: 4688 (workloadmgr-api)
CGroup: /system.slice/wlm-api.service
├─4688 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-api --config-file=/etc/workloadmgr/workloadmgr.confsystemctl status wlm-scheduler
######
● wlm-scheduler.service - Cluster Controlled wlm-scheduler
Loaded: loaded (/etc/systemd/system/wlm-scheduler.service; disabled; vendor preset: disabled)
Drop-In: /run/systemd/system/wlm-scheduler.service.d
└─50-pacemaker.conf
Active: active (running) since Sat 2022-01-22 13:49:28 UTC; 1 day 23h ago
Main PID: 9342 (workloadmgr-sch)
CGroup: /system.slice/wlm-scheduler.service
└─9342 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-scheduler --config-file=/etc/workloadmgr/workloadmgr.confsystemctl status wlm-workloads
######
● wlm-workloads.service - workloadmanager workloads service
Loaded: loaded (/etc/systemd/system/wlm-workloads.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2021-11-02 19:51:05 UTC; 2 months 21 days ago
Main PID: 606 (workloadmgr-wor)
CGroup: /system.slice/wlm-workloads.service
├─ 606 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-workloads --config-file=/etc/workloadmgr/workloadmgr.confsystemctl status wlm-cron
######
● wlm-cron.service - Cluster Controlled wlm-cron
Loaded: loaded (/etc/systemd/system/wlm-cron.service; disabled; vendor preset: disabled)
Drop-In: /run/systemd/system/wlm-cron.service.d
└─50-pacemaker.conf
Active: active (running) since Sat 2022-01-22 13:49:28 UTC; 1 day 23h ago
Main PID: 9209 (workloadmgr-cro)
CGroup: /system.slice/wlm-cron.service
├─9209 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-cron --config-file=/etc/workloadmgr/workloadmgr.confpcs status
######
Cluster name: triliovault
WARNINGS:
Corosync and pacemaker node names do not match (IPs used in setup?)
Stack: corosync
Current DC: TVM1 (version 1.1.21-4.el7-f14e36fd43) - partition with quorum
Last updated: Mon Jan 24 13:42:01 2022
Last change: Tue Nov 2 19:07:04 2021 by root via crm_resource on TVM2
3 nodes configured
9 resources configured
Online: [ TVM1 TVM2 TVM3 ]
Full list of resources:
virtual_ip (ocf::heartbeat:IPaddr2): Started TVM2
virtual_ip_public (ocf::heartbeat:IPaddr2): Started TVM2
virtual_ip_admin (ocf::heartbeat:IPaddr2): Started TVM2
virtual_ip_internal (ocf::heartbeat:IPaddr2): Started TVM2
wlm-cron (systemd:wlm-cron): Started TVM2
wlm-scheduler (systemd:wlm-scheduler): Started TVM2
Clone Set: lb_nginx-clone [lb_nginx]
Started: [ TVM2 ]
Stopped: [ TVM1 TVM3 ]
Daemon Status:
corosync: active/enabled
pacemaker: active/enabled
pcsd: active/enabledcurl http://10.10.2.34:8780/v1/8e16700ae3614da4ba80a4e57d60cdb9/workload_types/detail -X GET -H "X-Auth-Project-Id: admin" -H "User-Agent: python-workloadmgrclient" -H "Accept: application/json" -H "X-Auth-Token: gAAAAABe40NVFEtJeePpk1F9QGGh1LiGnHJVLlgZx9t0HRrK9rC5vqKZJRkpAcW1oPH6Q9K9peuHiQrBHEs1-g75Na4xOEESR0LmQJUZP6n37fLfDL_D-hlnjHJZ68iNisIP1fkm9FGSyoyt6IqjO9E7_YVRCTCqNLJ67ZkqHuJh1CXwShvjvjwroot@ansible:~# openstack endpoint list | grep dmapi
| 190db2ce033e44f89de73abcbf12804e | US-WEST-2 | dmapi | datamover | True | public | https://osa-victoria-ubuntu20-2.triliodata.demo:8784/v2 |
| dec1a323791b49f0ac7901a2dc806ee2 | US-WEST-2 | dmapi | datamover | True | admin | http://10.10.10.154:8784/v2 |
| f8c4162c9c1246ffb0190d0d093c48af | US-WEST-2 | dmapi | datamover | True | internal | http://10.10.10.154:8784/v2 |
root@ansible:~# curl http://10.10.10.154:8784
{"versions": [{"id": "v2.0", "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2011-01-21T11:33:21Z", "links": [{"rel": "self", "href": "hlxc-attach -n <dmapi-container-name> (go to dmapi conatiner)
root@controller-dmapi-container-08df1e06:~# systemctl status tvault-datamover-api.service
● tvault-datamover-api.service - TrilioData DataMover API service
Loaded: loaded (/lib/systemd/system/tvault-datamover-api.service; enabled; vendor preset: enabled)
Active: active (running) since Wed 2022-01-12 11:53:39 UTC; 1 day 17h ago
Main PID: 23888 (dmapi-api)
Tasks: 289 (limit: 57729)
Memory: 607.7M
CGroup: /system.slice/tvault-datamover-api.service
├─23888 /usr/bin/python3 /usr/bin/dmapi-api
├─23893 /usr/bin/python3 /usr/bin/dmapi-api
├─23894 /usr/bin/python3 /usr/bin/dmapi-api
├─23895 /usr/bin/python3 /usr/bin/dmapi-api
├─23896 /usr/bin/python3 /usr/bin/dmapi-api
├─23897 /usr/bin/python3 /usr/bin/dmapi-api
├─23898 /usr/bin/python3 /usr/bin/dmapi-api
├─23899 /usr/bin/python3 /usr/bin/dmapi-api
├─23900 /usr/bin/python3 /usr/bin/dmapi-api
├─23901 /usr/bin/python3 /usr/bin/dmapi-api
├─23902 /usr/bin/python3 /usr/bin/dmapi-api
├─23903 /usr/bin/python3 /usr/bin/dmapi-api
├─23904 /usr/bin/python3 /usr/bin/dmapi-api
├─23905 /usr/bin/python3 /usr/bin/dmapi-api
├─23906 /usr/bin/python3 /usr/bin/dmapi-api
├─23907 /usr/bin/python3 /usr/bin/dmapi-api
└─23908 /usr/bin/python3 /usr/bin/dmapi-api
Jan 12 11:53:39 controller-dmapi-container-08df1e06 systemd[1]: Started TrilioData DataMover API service.
Jan 12 11:53:40 controller-dmapi-container-08df1e06 dmapi-api[23888]: Could not load
Jan 12 11:53:40 controller-dmapi-container-08df1e06 dmapi-api[23888]: Could not load
root@compute:~# systemctl status tvault-contego
● tvault-contego.service - Tvault contego
Loaded: loaded (/etc/systemd/system/tvault-contego.service; enabled; vendor preset: enabled)
Active: active (running) since Fri 2022-01-14 05:45:19 UTC; 2s ago
Main PID: 1489651 (python3)
Tasks: 19 (limit: 67404)
Memory: 6.7G (max: 10.0G)
CGroup: /system.slice/tvault-contego.service
├─ 998543 /bin/qemu-nbd -c /dev/nbd45 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,file>
├─ 998772 /bin/qemu-nbd -c /dev/nbd73 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,file>
├─ 998931 /bin/qemu-nbd -c /dev/nbd100 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,fil>
├─ 999147 /bin/qemu-nbd -c /dev/nbd35 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,file>
├─1371322 /bin/qemu-nbd -c /dev/nbd63 --object secret,id=sec0,data=payload-test1 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,fil>
├─1371524 /bin/qemu-nbd -c /dev/nbd91 --object secret,id=sec0,data=payload-test1 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,fil>
└─1489651 /openstack/venvs/nova-22.3.1/bin/python3 /usr/bin/tvault-contego --config-file=/etc/nova/nova.conf --config-file=/etc/tvault-contego/tvault-cont>
Jan 14 05:45:19 compute systemd[1]: Started Tvault contego.
Jan 14 05:45:20 compute sudo[1489653]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/openstack/venvs/nova-22.3.1/bin/nova-rootwrap /etc/nova/rootwrap.conf umou>
Jan 14 05:45:20 compute sudo[1489653]: pam_unix(sudo:session): session opened for user root by (uid=0)
Jan 14 05:45:21 compute python3[1489655]: umount: /var/triliovault-mounts/VHJpbGlvVmF1bHQ=: no mount point specified.
Jan 14 05:45:21 compute sudo[1489653]: pam_unix(sudo:session): session closed for user root
Jan 14 05:45:21 compute tvault-contego[1489651]: 2022-01-14 05:45:21.499 1489651 INFO __main__ [req-48c32a39-38d0-45b9-9852-931e989133c6 - - - - -] CPU Control group m>
Jan 14 05:45:21 compute tvault-contego[1489651]: 2022-01-14 05:45:21.499 1489651 INFO __main__ [req-48c32a39-38d0-45b9-9852-931e989133c6 - - - - -] I/O Control Group m>
lines 1-22/22 (END)root@ansible:~# openstack endpoint list | grep dmapi
| 190db2ce033e44f89de73abcbf12804e | US-WEST-2 | dmapi | datamover | True | public | https://osa-victoria-ubuntu20-2.triliodata.demo:8784/v2 |
| dec1a323791b49f0ac7901a2dc806ee2 | US-WEST-2 | dmapi | datamover | True | admin | http://10.10.10.154:8784/v2 |
| f8c4162c9c1246ffb0190d0d093c48af | US-WEST-2 | dmapi | datamover | True | internal | http://10.10.10.154:8784/v2 |
root@ansible:~# curl http://10.10.10.154:8784
{"versions": [{"id": "v2.0", "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2011-01-21T11:33:21Z", "links": [{"rel": "self", "href": "h[root@controller ~]# docker ps | grep triliovault_datamover_api
3f979c15cedc trilio/centos-binary-trilio-datamover-api:4.2.50-victoria "dumb-init --single-…" 3 days ago Up 3 days triliovault_datamover_api[root@compute1 ~]# docker ps | grep triliovault_datamover
2f1ece820a59 trilio/centos-binary-trilio-datamover:4.2.50-victoria "dumb-init --single-…" 3 days ago Up 3 days triliovault_datamover[root@controller ~]# docker ps | grep horizon
4a004c786d47 trilio/centos-binary-trilio-horizon-plugin:4.2.50-victoria "dumb-init --single-…" 3 days ago Up 3 days (unhealthy) horizonroot@jujumaas:~# juju status | grep trilio
trilio-data-mover 4.2.51 active 3 trilio-data-mover jujucharms 9 ubuntu
trilio-dm-api 4.2.51 active 1 trilio-dm-api jujucharms 7 ubuntu
trilio-horizon-plugin 4.2.51 active 1 trilio-horizon-plugin jujucharms 6 ubuntu
trilio-wlm 4.2.51 active 1 trilio-wlm jujucharms 9 ubuntu
trilio-data-mover/8 active idle 172.17.1.5 Unit is ready
trilio-data-mover/6 active idle 172.17.1.6 Unit is ready
trilio-data-mover/7* active idle 172.17.1.7 Unit is ready
trilio-horizon-plugin/2* active idle 172.17.1.16 Unit is ready
trilio-dm-api/2* active idle 1/lxd/4 172.17.1.27 8784/tcp Unit is ready
trilio-wlm/2* active idle 7 172.17.1.28 8780/tcp Unit is readyOn rhosp13 OS: docker ps | grep trilio-
On other (rhosp16 onwards/tripleo) : podman ps | grep trilio-
[root@overcloudtrain1-controller-0 heat-admin]# podman ps | grep trilio-
e3530d6f7bec ucqa161.ctlplane.trilio.local:8787/trilio/trilio-datamover-api:4.2.47-rhosp16.1 kolla_start 2 weeks ago Up 2 weeks ago trilio_dmapi
f93f7019f934 ucqa161.ctlplane.trilio.local:8787/trilio/trilio-horizon-plugin:4.2.47-rhosp16.1 kolla_start 2 weeks ago Up 2 weeks ago horizonOn rhosp13 OS: docker ps | grep trilio-
On other (rhosp/tripleo) : podman ps | grep trilio-
[root@overcloudtrain3-novacompute-1 heat-admin]# podman ps | grep trilio-
4419b02e075c undercloud162.ctlplane.trilio.local:8787/trilio/trilio-datamover:dev-osp16.2-1-rhosp16.2 kolla_start 2 days ago Up 27 seconds ago trilio_datamover (overcloudtrain1) [stack@ucqa161 ~]$ openstack endpoint list | grep datamover
| 218b2f92569a4d259839fa3ea4d6103a | regionOne | dmapi | datamover | True | internal | https://overcloudtrain1internalapi.trilio.local:8784/v2 |
| 4702c51aa5c24bed853e736499e194e2 | regionOne | dmapi | datamover | True | public | https://overcloudtrain1.trilio.local:13784/v2 |
| c8169025eb1e4954ab98c7abdb0f53f6 | regionOne | dmapi | datamover | True | admin | https://overcloudtrain1internalapi.trilio.local:8784/v2 Learn about configuring Trilio for OpenStack
openstack image create \
--file <File Manager Image Path> \
--container-format bare \
--disk-format qcow2 \
--public \
--property hw_qemu_guest_agent=yes \
--property tvault_recovery_manager=yes \
--property hw_disk_bus=virtio \
tvault-file-managerguest-file-read
guest-file-write
guest-file-open
guest-file-closeSELINUX=disabledyum install python3 lvm2apt-get update
apt-get install qemu-guest-agent
systemctl enable qemu-guest-agentLoaded: loaded (/etc/init.d/qemu-guest-agent; generated)DAEMON_ARGS="-F/etc/qemu/fsfreeze-hook"Loaded: loaded (/usr/lib/systemd/system/qemu-guest-agent.service; disabled; vendor preset: enabled)systemctl edit qemu-guest-agent[Service]
ExecStart=
ExecStart=/usr/sbin/qemu-ga -F/etc/qemu/fsfreeze-hooksystemctl restart qemu-guest-agentapt-get install python3workloadmgr snapshot-mount <snapshot_id> <mount_vm_id>workloadmgr snapshot-mounted-list [--workloadid <workloadid>]workloadmgr snapshot-dismount <snapshot_id>tvault-horizon-plugin package in the required python versiontls-everywhere-endpoints-dns.yaml i. Before proceeding for upgrade OR reinitialize, fetch the list of ALL workload IDs which are NOT in error OR deleted state from database
Query : select id from workloads where status not in ('deleted','error')
ii. Use IDs from this list to create the import CLI command parameters.
Sample : --workloadids <wl_id1> --workloadids <wl_id2> …. etc. Shell command below to do the same.
wlIdList.txt to have all workload IDs; one ID per line.
iii. awk '{print " --workloadids "$1}' wlIdList.txt | tr -d '\n'
iv Append output of above command to the import command.
workloadmgr workload-importworkloads <Command output>
Eg : workloadmgr workload-importworkloads --workloadids ff24945f-7bef-498d-98eb-d727ec85bc7b --workloadids a15948b4-942c-47e2-85c5-06cad697010f[root@TVM2 ~]# pcs resource disable wlm-cron
[root@TVM2 ~]# systemctl status wlm-cron
● wlm-cron.service - workload's scheduler cron service
Loaded: loaded (/etc/systemd/system/wlm-cron.service; disabled; vendor preset : disabled)
Active: inactive (dead)
Jun 11 08:27:06 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:06 - INFO - 1...t
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 140686268624368 Child 11389 ki...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - 1...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Shutting down thread pool
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...l
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Stopping the threads
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...s
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: All threads are stopped succes...y
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - A...y
Jun 11 08:27:09 TVM2 systemd[1]: Stopped workload's scheduler cron service.
Hint: Some lines were ellipsized, use -l to show in full.
[root@TVM2 ~]# pcs resource show wlm-cron
Resource: wlm-cron (class=systemd type=wlm-cron)
Meta Attrs: target-role=Stopped
Operations: monitor interval=30s on-fail=restart timeout=300s (wlm-cron-monito r-interval-30s)
start interval=0s on-fail=restart timeout=300s (wlm-cron-start-int erval-0s)
stop interval=0s timeout=300s (wlm-cron-stop-interval-0s)
[root@TVM2 ~]# ps -ef | grep -i workloadmgr-cron
root 15379 14383 0 08:27 pts/0 00:00:00 grep --color=auto -i workloadmgr -cron
deb [trusted=yes] https://apt.fury.io/triliodata-4-2/ /apt-get update
apt list --upgradable[triliovault-4-2]
name=triliovault-4-2
baseurl=http://trilio:[email protected]:8283/triliodata-4-2/yum/
gpgcheck=0
enabled=1yum repolist
yum check-upgradelxc-ls #grep container name for dmapi service
lxc-attach -n <dmapi container name>
tar -czvf dmapi_config.tar.gz /etc/dmapi
apt list --upgradable
apt install python3-dmapi --upgrade
tar -xzvf dmapi_config.tar.gz -C /
systemctl restart tvault-datamover-api
systemctl status tvault-datamover-apilxc-ls #grep container name for dmapi service
lxc-attach -n <dmapi container name>
tar -czvf dmapi_config.tar.gz /etc/dmapi
yum list installed | grep dmapi ##use dnf if yum not available
yum check-update python3-dmapi ##use dnf if yum not available
yum upgrade python3-dmapi ##use dnf if yum not available
tar -xzvf dmapi_config.tar.gz -C /
systemctl restart tvault-datamover-api
systemctl status tvault-datamover-apilxc-attach -n controller_horizon_container-ead7cc60
apt list --upgradable
apt install python3-tvault-horizon-plugin --upgrade
apt install python3-workloadmgrclient --upgrade
apt install python3-contegoclient --upgrade
systemctl restart apache2
workloadmgr --versionlxc-attach -n controller_horizon_container-ead7cc60
yum list installed | grep trilio ##use dnf if yum not available
yum upgrade python3-contegoclient-el8 python3-tvault-horizon-plugin-el8 python3-workloadmgrclient-el8 ##use dnf if yum not available
systemctl restart httpd
workloadmgr --versiontar -czvf contego_config.tar.gz /etc/tvault-contego/ [root@compute ~]# df -h
df: /var/trilio/triliovault-mounts: Transport endpoint is not connected
Filesystem Size Used Avail Use% Mounted on
devtmpfs 28G 0 28G 0% /dev
tmpfs 28G 0 28G 0% /dev/shm
tmpfs 28G 928K 28G 1% /run
tmpfs 28G 0 28G 0% /sys/fs/cgroup
/dev/mapper/cl_centos8-root 70G 13G 58G 19% /
/dev/vda1 1014M 231M 784M 23% /boot
tmpfs 5.5G 0 5.5G 0% /run/user/0
172.25.0.10:/mnt/tvault/42436 2.5T 1.8T 674G 74% /var/triliovault-mounts/MTcyLjI1LjAuMTA6L21udC90dmF1bHQvNDI0MzY=
[root@compute ~]# umount /var/triliovault-mounts/MTcyLjI1LjAuMTA6L21udC90dmF1bHQvNDI0MzY=
apt install python3-tvault-contego --upgrade
apt install python3-s3-fuse-plugin --upgrade
yum upgrade python3-tvault-contego ##use dnf if yum not available
yum upgrade python3-s3fuse-plugin ##use dnf if yum not availabletar -xzvf contego_config.tar.gz -C /
systemctl restart tvault-contego
systemctl status tvault-contego
#To check if backend storage got mounted successfully
df -htar -czvf contego_config.tar.gz /etc/tvault-contego/ /etc/tvault-object-store/
e.g. root@compute:~# df -h
Filesystem Size Used Avail Use% Mounted on
udev 28G 0 28G 0% /dev
tmpfs 5.5G 1.4M 5.5G 1% /run
/dev/vda3 124G 16G 102G 13% /
tmpfs 28G 20K 28G 1% /dev/shm
tmpfs 5.0M 0 5.0M 0% /run/lock
tmpfs 28G 0 28G 0% /sys/fs/cgroup
/dev/vda1 456M 297M 126M 71% /boot
tmpfs 6.3G 0 6.3G 0% /var/triliovault/tmpfs
Trilio - - 0.0K - /var/triliovault-mounts
tmpfs 5.5G 0 5.5G 0% /run/user/0
[root@compute ~]# umount /var/triliovault-mountsapt install python3-tvault-contego --upgrade
apt install python3-s3-fuse-plugin --upgrade
yum upgrade python3-tvault-contego ##use dnf if yum not available
yum upgrade python3-s3fuse-plugin ##use dnf if yum not availabletar -xzvf contego_config.tar.gz -C /
systemctl restart tvault-contego
systemctl status tvault-contego
#To check if backend storage got mounted successfully
df -hretries 5
timeout http-request 10m
timeout queue 10m
timeout connect 10m
timeout client 10m
timeout server 10m
timeout check 10m
balance roundrobin
maxconn 50000haproxy_extra_services:
- service:
haproxy_service_name: datamover_service
haproxy_backend_nodes: "{{ groups['dmapi_all'] | default([]) }}"
haproxy_ssl: "{{ haproxy_ssl }}"
haproxy_port: 8784
haproxy_balance_type: http
haproxy_backend_options:
- "httpchk GET / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"# Datamover haproxy setting
haproxy_extra_services:
- service:
haproxy_service_name: datamover_service
haproxy_backend_nodes: "{{ groups['dmapi_all'] | default([]) }}"
haproxy_ssl: "{{ haproxy_ssl }}"
haproxy_port: 8784
haproxy_balance_type: http
haproxy_balance_alg: roundrobin
haproxy_timeout_client: 10m
haproxy_timeout_server: 10m
haproxy_backend_options:
- "httpchk GET / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"cd /opt/openstack-ansible/playbooks
openstack-ansible haproxy-install.ymlcd /home/stack
git clone -b 4.3.2 https://github.com/trilioData/triliovault-cfg-scripts.git
cd triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/cp s3-cert.pem /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/puppet/trilio/files/cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/scripts/
chmod +x *.sh
./upload_puppet_module.sh
## Output of the above command looks like the following.
Creating tarball...
Tarball created.
Creating heat environment file: /home/stack/.tripleo/environments/puppet-modules-url.yaml
Uploading file to swift: /tmp/puppet-modules-8Qjya2X/puppet-modules.tar.gz
+-----------------------+---------------------+----------------------------------+
| object | container | etag |
+-----------------------+---------------------+----------------------------------+
| puppet-modules.tar.gz | overcloud-artifacts | 368951f6a4d39cfe53b5781797b133ad |
+-----------------------+---------------------+----------------------------------+
## Above command creates the following file.
ls -ll /home/stack/.tripleo/environments/puppet-modules-url.yaml
'OS::TripleO::Services::TrilioDatamoverApi''OS::TripleO::Services::TrilioDatamover''OS::TripleO::Services::TrilioHorizon'Trilio Datamove container: docker.io/trilio/tripleo-train-centos7-trilio-datamover:<HOTFIX-TAG-VERSION>-tripleo
Trilio Datamover Api Container: docker.io/trilio/tripleo-train-centos7-trilio-datamover-api:<HOTFIX-TAG-VERSION>-tripleo
Trilio horizon plugin: docker.io/trilio/tripleo-train-centos7-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-tripleo# For TripleO Train Centos7
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: docker.io/trilio/tripleo-train-centos7-trilio-datamover:<HOTFIX-TAG-VERSION>-tripleo
DockerTrilioDmApiImage: docker.io/trilio/tripleo-train-centos7-trilio-datamover-api:<HOTFIX-TAG-VERSION>-tripleo
DockerHorizonImage: docker.io/trilio/tripleo-train-centos7-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-tripleocd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/scripts/
sudo ./prepare_trilio_images.sh <undercloud_registry_hostname_or_ip> <OS_platform> <4.1-TRIPLEO-CONTAINER> <container_tool_available_on_undercloud>
Options OS_platform: [centos7]
Options container_tool_available_on_undercloud: [docker, podman]
## To get undercloud registry hostname/ip, we have two approaches. Use either one.
1. openstack tripleo container image list
2. find your 'containers-prepare-parameter.yaml' (from overcloud deploy command) and search for 'push_destination'
cat /home/stack/containers-prepare-parameter.yaml | grep push_destination
- push_destination: "undercloud.ctlplane.ooo.prod1:8787"
Here, 'undercloud.ctlplane.ooo.prod1' is undercloud registry hostname. Use it in our command like following example.
# Command Example:
sudo ./prepare_trilio_images.sh undercloud.ctlplane.ooo.prod1 centos7 <HOTFIX-TAG-VERSION>-tripleo podman
## Verify changes
# For TripleO Train Centos7
$ grep '<HOTFIX-TAG-VERSION>-tripleo' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: prod1-undercloud.demo:8787/trilio/tripleo-train-centos7-trilio-datamover:<HOTFIX-TAG-VERSION>-tripleo
DockerTrilioDmApiImage: prod1-undercloud.demo:8787/trilio/tripleo-train-centos7-trilio-datamover-api:<HOTFIX-TAG-VERSION>-tripleo
DockerHorizonImage: prod1-undercloud.demo:8787/trilio/tripleo-train-centos7-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-tripleo## For Centos7 Train
(undercloud) [stack@undercloud redhat-director-scripts/tripleo-train]$ openstack tripleo container image list | grep trilio
| docker://undercloud.ctlplane.localdomain:8787/trilio/tripleo-train-centos7-trilio-datamover:<HOTFIX-TAG-VERSION>-tripleo | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/tripleo-train-centos7-trilio-datamover-api:<HOTFIX-TAG-VERSION>-tripleo | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/tripleo-train-centos7-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-tripleo |cd triliovault-cfg-scripts/common/(undercloud) [stack@ucqa161 ~]$ openstack server list
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+
| ID | Name | Status | Networks | Image | Flavor |
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+
| 8c3d04ae-fcdd-431c-afa6-9a50f3cb2c0d | overcloudtrain1-controller-2 | ACTIVE | ctlplane=172.30.5.18 | overcloud-full | control |
| 103dfd3e-d073-4123-9223-b8cf8c7398fe | overcloudtrain1-controller-0 | ACTIVE | ctlplane=172.30.5.11 | overcloud-full | control |
| a3541849-2e9b-4aa0-9fa9-91e7d24f0149 | overcloudtrain1-controller-1 | ACTIVE | ctlplane=172.30.5.25 | overcloud-full | control |
| 74a9f530-0c7b-49c4-9a1f-87e7eeda91c0 | overcloudtrain1-novacompute-0 | ACTIVE | ctlplane=172.30.5.30 | overcloud-full | compute |
| c1664ac3-7d9c-4a36-b375-0e4ee19e93e4 | overcloudtrain1-novacompute-1 | ACTIVE | ctlplane=172.30.5.15 | overcloud-full | compute |
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+## On Python3 env
sudo pip3 install PyYAML==5.1
## On Python2 env
sudo pip install PyYAML==5.1## On Python3 env
python3 ./generate_nfs_map.py
## On Python2 env
python ./generate_nfs_map.pygrep ':.*:' triliovault_nfs_map_output.yml >> ../redhat-director-scripts/tripleo-train/environments/trilio_nfs_map.yaml-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/environments/trilio_nfs_map.yamlopenstack overcloud deploy --templates \
-e /home/stack/templates/node-info.yaml \
-e /home/stack/templates/overcloud_images.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/environments/trilio_env.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/enable-tls.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/inject-trust-anchor.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/environments/trilio_env_tls_endpoints_public_dns.yaml \
--ntp-server 192.168.1.34 \
--libvirt-type qemu \
--log-file overcloud_deploy.log \
-r /home/stack/templates/roles_data.yaml[root@overcloud-controller-0 heat-admin]# podman ps | grep trilio
26fcb9194566 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-tripleo kolla_start 5 days ago Up 5 days ago trilio_dmapi
094971d0f5a9 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-tripleo kolla_start 5 days ago Up 5 days ago horizon/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg[root@overcloud-novacompute-0 heat-admin]# podman ps | grep trilio
b1840444cc59 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-tripleo kolla_start 5 days ago Up 5 days ago trilio_datamover[root@overcloud-controller-0 heat-admin]# podman ps | grep horizon
094971d0f5a9 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-tripleo kolla_start 5 days ago Up 5 days ago horizonopenstack stack failures list overcloud
heat stack-list --show-nested -f "status=FAILED"
heat resource-list --nested-depth 5 overcloud | grep FAILED
##=> If trilio datamover api containers does not start well or in restarting state, use following logs to debug.
docker logs trilio_dmapi
tailf /var/log/containers/trilio-datamover-api/dmapi.log
##=> If trilio datamover containers does not start well or in restarting state, use following logs to debug.
docker logs trilio_datamover
tailf /var/log/containers/trilio-datamover/tvault-contego.logcurl -O https://raw.githubusercontent.com/trilioData/triliovault-cfg-scripts/master/common/nova_userid.sh
chmod +x nova_userid.sh
vi nova_userid.sh # change nova user_id and group_id to uid & gid present on compute nodes.
./nova_userid.sh
id novagit clone -b <branch> https://github.com/trilioData/triliovault-cfg-scripts.gitcd triliovault-cfg-scripts/
cp -R ansible/roles/* /opt/openstack-ansible/playbooks/roles/
cp ansible/main-install.yml /opt/openstack-ansible/playbooks/os-tvault-install.yml
cp ansible/environments/group_vars/all/vars.yml /etc/openstack_deploy/user_tvault_vars.yml
cp ansible/tvault_pre_install.yml /opt/openstack-ansible/playbooks/- import_playbook: os-tvault-install.yml# Datamover haproxy setting
haproxy_extra_services:
- service:
haproxy_service_name: datamover_service
haproxy_backend_nodes: "{{ groups['dmapi_all'] | default([]) }}"
haproxy_ssl: "{{ haproxy_ssl }}"
haproxy_port: 8784
haproxy_balance_type: http
haproxy_balance_alg: roundrobin
haproxy_timeout_client: 10m
haproxy_timeout_server: 10m
haproxy_backend_options:
- "httpchk GET / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"cat > /opt/openstack-ansible/inventory/env.d/tvault-dmapi.yml
component_skel:
dmapi_api:
belongs_to:
- dmapi_all
container_skel:
dmapi_container:
belongs_to:
- tvault-dmapi_containers
contains:
- dmapi_api
physical_skel:
tvault-dmapi_containers:
belongs_to:
- all_containers
tvault-dmapi_hosts:
belongs_to:
- hosts#tvault-dmapi
tvault-dmapi_hosts: # Add controller details in this section as tvault DMAPI is resides on controller nodes.
infra-1: # controller host name.
ip: 172.26.0.3 # Ip address of controller
infra-2: # If we have multiple controllers add controllers details in same manner as shown in Infra-2
ip: 172.26.0.4
#tvault-datamover
tvault_compute_hosts: # Add compute details in this section as tvault datamover is resides on compute nodes.
infra-1: # compute host name.
ip: 172.26.0.7 # Ip address of compute node
infra-2: # If we have multiple compute nodes add compute details in same manner as shown in Infra-2
ip: 172.26.0.8##common editable parameters required for installing tvault-horizon-plugin, tvault-contego and tvault-datamover-api
#ip address of TVM
IP_ADDRESS: sample_tvault_ip_address
##Time Zone
TIME_ZONE: "Etc/UTC"
## Don't update or modify the value of TVAULT_PACKAGE_VERSION
## The default value of is '4.2.64'
TVAULT_PACKAGE_VERSION: 4.2.64
# Update Openstack dist code name like ussuri etc.
OPENSTACK_DIST: ussuri
#Need to add the following statement in nova sudoers file
#nova ALL = (root) NOPASSWD: /home/tvault/.virtenv/bin/privsep-helper *
#These changes require for Datamover, Otherwise Datamover will not work
#Are you sure? Please set variable to
# UPDATE_NOVA_SUDOERS_FILE: proceed
#other wise ansible tvault-contego installation will exit
UPDATE_NOVA_SUDOERS_FILE: proceed
##### Select snapshot storage type #####
#Details for NFS as snapshot storage , NFS_SHARES should begin with "-".
##True/False
NFS: False
NFS_SHARES:
- sample_nfs_server_ip1:sample_share_path
- sample_nfs_server_ip2:sample_share_path
#if NFS_OPTS is empty then default value will be "nolock,soft,timeo=180,intr,lookupcache=none"
NFS_OPTS: ""
## Valid for 'nfs' backup target only.
## If backup target NFS share supports multiple endpoints/ips but in backend it's a single share then
## set 'multi_ip_nfs_enabled' parameter to 'True'. Otherwise it's value should be 'False'
multi_ip_nfs_enabled: False
#### Details for S3 as snapshot storage
##True/False
S3: False
VAULT_S3_ACCESS_KEY: sample_s3_access_key
VAULT_S3_SECRET_ACCESS_KEY: sample_s3_secret_access_key
VAULT_S3_REGION_NAME: sample_s3_region_name
VAULT_S3_BUCKET: sample_s3_bucket
VAULT_S3_SIGNATURE_VERSION: default
#### S3 Specific Backend Configurations
#### Provide one of follwoing two values in s3_type variable, string's case should be match
#Amazon/Other_S3_Compatible
s3_type: sample_s3_type
#### Required field(s) for all S3 backends except Amazon
VAULT_S3_ENDPOINT_URL: ""
#True/False
VAULT_S3_SECURE: True
VAULT_S3_SSL_CERT: ""
###details of datamover API
##If SSL is enabled "DMAPI_ENABLED_SSL_APIS" value should be dmapi.
#DMAPI_ENABLED_SSL_APIS: dmapi
##If SSL is disabled "DMAPI_ENABLED_SSL_APIS" value should be empty.
DMAPI_ENABLED_SSL_APIS: ""
DMAPI_SSL_CERT: ""
DMAPI_SSL_KEY: ""
## Trilio dmapi_workers count
## Default value of dmapi_workers is 16
dmapi_workers: 16
#### Any service is using Ceph Backend then set ceph_backend_enabled value to True
#True/False
ceph_backend_enabled: False
## Provide Horizon Virtual Env path from Horizon_container
## e.g. '/openstack/venvs/horizon-23.1.0'
horizon_virtual_env: '/openstack/venvs/horizon*'
## When More Than One Nova Virtual Env. On Compute Node(s) and
## User Wants To Specify Specific Nova Virtual Env. From Existing
## Then Only Uncomment the var nova_virtual_env and pass value like 'openstack/venvs/nova-23.2.0'
#nova_virtual_env: 'openstack/venvs/nova-23.2.0'
#Set verbosity level and run playbooks with -vvv option to display custom debug messages
verbosity_level: 3
#******************************************************************************************************************************************************************
###static fields for tvault contego extension ,Please Do not Edit Below Variables
#******************************************************************************************************************************************************************
#SSL path
DMAPI_SSL_CERT_DIR: /opt/config-certs/dmapi
VAULT_S3_SSL_CERT_DIR: /opt/config-certs/s3
RABBITMQ_SSL_DIR: /opt/config-certs/rabbitmq
DMAPI_SSL_CERT_PATH: /opt/config-certs/dmapi/dmapi-ca.pem
DMAPI_SSL_KEY_PATH: /opt/config-certs/dmapi/dmapi.key
VAULT_S3_SSL_CERT_PATH: /opt/config-certs/s3/ca_cert.pem
RABBITMQ_SSL_CERT_PATH: /opt/config-certs/rabbitmq/rabbitmq.pem
RABBITMQ_SSL_KEY_PATH: /opt/config-certs/rabbitmq/rabbitmq.key
RABBITMQ_SSL_CA_CERT_PATH: /opt/config-certs/rabbitmq/rabbitmq-ca.pem
PORT_NO: 8085
PYPI_PORT: 8081
DMAPI_USR: dmapi
DMAPI_GRP: dmapi
#tvault contego file path
TVAULT_CONTEGO_CONF: /etc/tvault-contego/tvault-contego.conf
TVAULT_OBJECT_STORE_CONF: /etc/tvault-object-store/tvault-object-store.conf
NOVA_CONF_FILE: /etc/nova/nova.conf
#Nova distribution specific configuration file path
NOVA_DIST_CONF_FILE: /usr/share/nova/nova-dist.conf
TVAULT_CONTEGO_EXT_USER: nova
TVAULT_CONTEGO_EXT_GROUP: nova
TVAULT_DATA_DIR_MODE: 0775
TVAULT_DATA_DIR_OLD: /var/triliovault
TVAULT_DATA_DIR: /var/triliovault-mounts
TVAULT_CONTEGO_VIRTENV: /home/tvault
TVAULT_CONTEGO_VIRTENV_PATH: "{{TVAULT_CONTEGO_VIRTENV}}/.virtenv"
TVAULT_CONTEGO_EXT_BIN: "{{TVAULT_CONTEGO_VIRTENV_PATH}}/bin/tvault-contego"
TVAULT_CONTEGO_EXT_PYTHON: "{{TVAULT_CONTEGO_VIRTENV_PATH}}/bin/python"
TVAULT_CONTEGO_EXT_OBJECT_STORE: ""
TVAULT_CONTEGO_EXT_BACKEND_TYPE: ""
TVAULT_CONTEGO_EXT_S3: "{{TVAULT_CONTEGO_VIRTENV_PATH}}/lib/python2.7/site-packages/contego/nova/extension/driver/s3vaultfuse.py"
privsep_helper_file: /home/tvault/.virtenv/bin/privsep-helper
pip_version: 7.1.2
virsh_version: "1.2.8"
contego_service_file_path: /etc/systemd/system/tvault-contego.service
contego_service_ulimits_count: 65536
contego_service_debian_path: /etc/init/tvault-contego.conf
objstore_service_file_path: /etc/systemd/system/tvault-object-store.service
objstore_service_debian_path: /etc/init/tvault-object-store.conf
ubuntu: "Ubuntu"
centos: "CentOS"
redhat: "RedHat"
Amazon: "Amazon"
Other_S3_Compatible: "Other_S3_Compatible"
tvault_datamover_api: tvault-datamover-api
datamover_service_file_path: /etc/systemd/system/tvault-datamover-api.service
datamover_service_debian_path: /etc/init/tvault-datamover.conf
datamover_log_dir: /var/log/dmapi
trilio_yum_repo_file_path: /etc/yum.repos.d/trilio.repo
verbosity_level: 3## Valid for 'nfs' backup target only.
## If backup target NFS share supports multiple endpoints/ips but in backend it's a single share then
## set 'multi_ip_nfs_enabled' paremeter to 'True'. Otherwise it's value should be 'False'
multi_ip_nfs_enabled: Falsecd triliovault-cfg-scripts/common/vi triliovault_nfs_map_input.ymlpip3 install -U pyyamlpython ./generate_nfs_map.pyvi triliovault_nfs_map_output.ymlcat triliovault_nfs_map_output.yml >> /etc/openstack_deploy/user_tvault_vars.ymlcd /opt/openstack-ansible/playbooks
## Run tvault_pre_install.yml to install lxc packages
ansible-playbook tvault_pre_install.yml
# To create Dmapi container
openstack-ansible lxc-containers-create.yml
#To Deploy Trilio Components
openstack-ansible os-tvault-install.yml
#To configure Haproxy for Dmapi
openstack-ansible haproxy-install.ymlopenstack-ansible setup-infrastructure.yml --syntax-check
openstack-ansible setup-hosts.yml
openstack-ansible setup-infrastructure.yml
openstack-ansible setup-openstack.ymllxc-ls # Check the dmapi container is present on controller node.
lxc-info -s controller_dmapi_container-a11984bf # Confirm running status of the containersystemctl status tvault-contego.service
systemctl status tvault-object-store # If Storage backend is S3
df -h # Verify the mount point is mounted on compute node(s)lxc-attach -n controller_horizon_container-1d9c055c # To login on horizon container
apt list | egrep 'tvault-horizon-plugin|workloadmgrclient|contegoclient' # For ubuntu based container
dnf list installed |egrep 'tvault-horizon-plugin|workloadmgrclient|contegoclient' # For CentOS based container haproxy -c -V -f /etc/haproxy/haproxy.cfg # Verify the keyword datamover_service-back is present in output.workloadmgr workload-list [--all {True,False}] [--nfsshare <nfsshare>]workloadmgr workload-create --instance <instance-id=instance-uuid>
[--display-name <display-name>]
[--display-description <display-description>]
[--workload-type-id <workload-type-id>]
[--source-platform <source-platform>]
[--jobschedule <key=key-name>]
[--metadata <key=key-name>]
[--policy-id <policy_id>]
[--encryption <True/False>]
[--secret-uuid <secret_uuid>]workloadmgr workload-show <workload_id> [--verbose <verbose>]usage: workloadmgr workload-modify [--display-name <display-name>]
[--display-description <display-description>]
[--instance <instance-id=instance-uuid>]
[--jobschedule <key=key-name>]
[--metadata <key=key-name>]
[--policy-id <policy_id>]
<workload_id>workloadmgr workload-delete [--database_only <True/False>] <workload_id>workloadmgr workload-unlock <workload_id>workloadmgr workload-reset <workload_id>tls-endpoints-public-ip.yamltls-everywhere-endpoints-dns.yamlcd /home/stack
mv triliovault-cfg-scripts triliovault-cfg-scripts-old
#Additionally keep the NFS share path noted
#/var/lib/nova/triliovault-mounts/MTcyLjMwLjEuMzovcmhvc3BuZnM=
##Clone latest Trilio cfg scripts repository
git clone --branch 4.3.2 https://github.com/trilioData/triliovault-cfg-scripts.git
cd triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/cp s3-cert.pem /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/puppet/trilio/files/cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/scripts/
chmod +x *.sh
./upload_puppet_module.sh
## Output of above command looks like following.
Creating tarball...
Tarball created.
Creating heat environment file: /home/stack/.tripleo/environments/puppet-modules-url.yaml
Uploading file to swift: /tmp/puppet-modules-8Qjya2X/puppet-modules.tar.gz
+-----------------------+---------------------+----------------------------------+
| object | container | etag |
+-----------------------+---------------------+----------------------------------+
| puppet-modules.tar.gz | overcloud-artifacts | 368951f6a4d39cfe53b5781797b133ad |
+-----------------------+---------------------+----------------------------------+
## Above command creates following file.
ls -ll /home/stack/.tripleo/environments/puppet-modules-url.yaml
Trilio Datamove container: docker.io/trilio/tripleo-train-centos7-trilio-datamover:<HOTFIX-TAG-VERSION>-tripleo
Trilio Datamover Api Container: docker.io/trilio/tripleo-train-centos7-trilio-datamover-api:<HOTFIX-TAG-VERSION>-tripleo
Trilio horizon plugin: docker.io/trilio/tripleo-train-centos7-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-tripleo# For TripleO Train Centos7
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: docker.io/trilio/tripleo-train-centos7-trilio-datamover:<HOTFIX-TAG-VERSION>-tripleo
DockerTrilioDmApiImage: docker.io/trilio/tripleo-train-centos7-trilio-datamover-api:<HOTFIX-TAG-VERSION>-tripleo
DockerHorizonImage: docker.io/trilio/tripleo-train-centos7-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-tripleocd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/scripts/
sudo ./prepare_trilio_images.sh <undercloud_registry_hostname_or_ip> <OS_platform> <4.3-TRIPLEO-CONTAINER> <container_tool_available_on_undercloud>
## To get undercloud registry hostname/ip, we have two approaches. Use either one.
1. openstack tripleo container image list
2. find your 'containers-prepare-parameter.yaml' (from overcloud deploy command) and search for 'push_destination'
cat /home/stack/containers-prepare-parameter.yaml | grep push_destination
- push_destination: "undercloud.ctlplane.ooo.prod1:8787"
Here, 'undercloud.ctlplane.ooo.prod1' is undercloud registry hostname. Use it in our command like following example.
# Command Example:
sudo ./prepare_trilio_images.sh undercloud.ctlplane.ooo.prod1 centos7 <HOTFIX-TAG-VERSION>-tripleo podman
## Verify changes
# For TripleO Train Centos7
$ grep 'Image' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: prod1-undercloud.demo:8787/trilio/tripleo-train-centos7-trilio-datamover:<HOTFIX-TAG-VERSION>-tripleo
DockerTrilioDmApiImage: prod1-undercloud.demo:8787/trilio/tripleo-train-centos7-trilio-datamover-api:<HOTFIX-TAG-VERSION>-tripleo
DockerHorizonImage: prod1-undercloud.demo:8787/trilio/tripleo-train-centos7-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-tripleo## For Centos7 Train
(undercloud) [stack@undercloud redhat-director-scripts/tripleo-train]$ openstack tripleo container image list | grep trilio
| docker://undercloud.ctlplane.localdomain:8787/trilio/tripleo-train-centos7-trilio-datamover:<HOTFIX-TAG-VERSION>-tripleo | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/tripleo-train-centos7-trilio-datamover-api:<HOTFIX-TAG-VERSION>-tripleo | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/tripleo-train-centos7-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-tripleo |cd triliovault-cfg-scripts/common/(undercloud) [stack@ucqa161 ~]$ openstack server list
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+
| ID | Name | Status | Networks | Image | Flavor |
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+
| 8c3d04ae-fcdd-431c-afa6-9a50f3cb2c0d | overcloudtrain1-controller-2 | ACTIVE | ctlplane=172.30.5.18 | overcloud-full | control |
| 103dfd3e-d073-4123-9223-b8cf8c7398fe | overcloudtrain1-controller-0 | ACTIVE | ctlplane=172.30.5.11 | overcloud-full | control |
| a3541849-2e9b-4aa0-9fa9-91e7d24f0149 | overcloudtrain1-controller-1 | ACTIVE | ctlplane=172.30.5.25 | overcloud-full | control |
| 74a9f530-0c7b-49c4-9a1f-87e7eeda91c0 | overcloudtrain1-novacompute-0 | ACTIVE | ctlplane=172.30.5.30 | overcloud-full | compute |
| c1664ac3-7d9c-4a36-b375-0e4ee19e93e4 | overcloudtrain1-novacompute-1 | ACTIVE | ctlplane=172.30.5.15 | overcloud-full | compute |
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+## On Python3 env
sudo pip3 install PyYAML==5.1
## On Python2 env
sudo pip install PyYAML==5.1## On Python3 env
python3 ./generate_nfs_map.py
## On Python2 env
python ./generate_nfs_map.pygrep ':.*:' triliovault_nfs_map_output.yml >> ../redhat-director-scripts/tripleo-train/environments/trilio_nfs_map.yaml-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/environments/trilio_nfs_map.yamlopenstack overcloud deploy --templates \
-e /home/stack/templates/node-info.yaml \
-e /home/stack/templates/overcloud_images.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/environments/trilio_env.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/enable-tls.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/inject-trust-anchor.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/environments/trilio_env_tls_endpoints_public_dns.yaml \
--ntp-server 192.168.1.34 \
--libvirt-type qemu \
--log-file overcloud_deploy.log \
-r /home/stack/templates/roles_data.yaml[root@overcloud-novacompute-0 heat-admin]# podman ps | grep trilio
b1840444cc59 prod1-compute1.demo:8787/trilio/tripleo-train-centos7-trilio-datamover:<HOTFIX-TAG-VERSION>-tripleo kolla_start 5 days ago Up 5 days ago trilio_datamover[root@overcloud-controller-0 heat-admin]# podman ps | grep horizon
094971d0f5a9 prod1-controller1.demo:8787/trilio/tripleo-train-centos7-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-tripleo kolla_start 5 days ago Up 5 days ago horizonopenstack stack failures list overcloud
heat stack-list --show-nested -f "status=FAILED"
heat resource-list --nested-depth 5 overcloud | grep FAILED
=> If trilio datamover api containers does not start well or in restarting state, use following logs to debug.
docker logs trilio_dmapi
tailf /var/log/containers/trilio-datamover-api/dmapi.log
=> If trilio datamover containers does not start well or in restarting state, use following logs to debug.
docker logs trilio_datamover
tailf /var/log/containers/trilio-datamover/tvault-contego.log i. Before proceeding for upgrade OR reinitialize, fetch the list of ALL workload IDs which are NOT in error OR deleted state from database
Query : select id from workloads where status not in ('deleted','error')
ii. Use IDs from this list to create the import CLI command parameters.
Sample : --workloadids <wl_id1> --workloadids <wl_id2> …. etc. Shell command below to do the same.
wlIdList.txt to have all workload IDs; one ID per line.
iii. awk '{print " --workloadids "$1}' wlIdList.txt | tr -d '\n'
iv Append output of above command to the import command.
workloadmgr workload-importworkloads <Command output>
Eg : workloadmgr workload-importworkloads --workloadids ff24945f-7bef-498d-98eb-d727ec85bc7b --workloadids a15948b4-942c-47e2-85c5-06cad697010fRHOSP/TripleO: /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
Kolla Ansible with CentOS: /etc/pki/tls/certs/ca-bundle.crt
Kolla Ansible with Ubuntu: /usr/local/share/ca-certificates/
OpenStack Ansible (OSA) with Ubuntu in our lab: /etc/openstack_deploy/ssl/
OpenStack Asnible (OSA) with CentOS: /etc/openstack_deploy/ssl/etc/workloadmgr/ca-chain.pemcreate database workloadmgr_auto;
CREATE USER 'trilio'@'localhost' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON workloadmgr_auto.* TO 'trilio'@'10.10.10.67' IDENTIFIED BY 'password';mysql://trilio:[email protected]/workloadmgr_auto?charset=utf8
HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 11:52:56 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-99f51825-9b47-41ea-814f-8f8141157fc7HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 12:06:01 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-4eb1863e-3afa-4a2c-b8e6-91a41fe37f78HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 12:31:49 GMT
Content-Type: application/json
Content-Length: 1223
Connection: keep-alive
X-Compute-Request-Id: req-c6f826a9-fff7-442b-8886-0770bb97c491
{
"scheduler_enabled":true,
"trust":{
"created_at":"2020-10-23T14:35:11.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"trust-002bcbaf-c16b-44e6-a9ef-9c1efbfa2e2c",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"value":"871ca24f38454b14b867338cb0e9b46c",
"description":"token id for user ccddc7e7a015487fa02920f4d4979779 project c76b3355a164498aa95ddbc960adc238",
"category":"identity",
"type":"trust_id",
"public":false,
"hidden":true,
"status":"available",
"metadata":[
{
"created_at":"2020-10-23T14:35:11.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"a3cc9a01-3d49-4ff8-ad8e-b12a7b3c68b0",
"settings_name":"trust-002bcbaf-c16b-44e6-a9ef-9c1efbfa2e2c",
"settings_project_id":"c76b3355a164498aa95ddbc960adc238",
"key":"role_name",
"value":"member"
}
]
},
"is_valid":true,
"scheduler_obj":{
"workload_id":"4bafaa03-f69a-45d5-a6fc-ae0119c77974",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"user_domain_id":"default",
"user":"ccddc7e7a015487fa02920f4d4979779",
"tenant":"c76b3355a164498aa95ddbc960adc238"
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 12:45:27 GMT
Content-Type: application/json
Content-Length: 30
Connection: keep-alive
X-Compute-Request-Id: req-cd447ce0-7bd3-4a60-aa92-35fc43b4729b
{"global_job_scheduler": true}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 12:49:29 GMT
Content-Type: application/json
Content-Length: 31
Connection: keep-alive
X-Compute-Request-Id: req-6f49179a-737a-48ab-91b7-7e7c460f5af0
{"global_job_scheduler": false}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 12:50:11 GMT
Content-Type: application/json
Content-Length: 30
Connection: keep-alive
X-Compute-Request-Id: req-ed279acc-9805-4443-af91-44a4420559bc
{"global_job_scheduler": true}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 04 Feb 2021 11:55:43 GMT
Content-Type: application/json
Content-Length: 403
Connection: keep-alive
X-Compute-Request-Id: req-ac16c258-7890-4ae7-b7f4-015b5aa4eb99
{
"settings":[
{
"created_at":"2021-02-04T11:55:43.890855",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"smtp_port",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":null,
"value":"8080",
"description":null,
"category":null,
"type":"email_settings",
"public":false,
"hidden":0,
"status":"available",
"is_public":false,
"is_hidden":false
}
]
}{
"settings":[
{
"category":null,
"name":<String Setting_name>,
"is_public":false,
"is_hidden":false,
"metadata":{
},
"type":<String Setting type>,
"value":<String Setting Value>,
"description":null
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 04 Feb 2021 12:01:27 GMT
Content-Type: application/json
Content-Length: 380
Connection: keep-alive
X-Compute-Request-Id: req-404f2808-7276-4c2b-8870-8368a048c28c
{
"setting":{
"created_at":"2021-02-04T11:55:43.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"smtp_port",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":null,
"value":"8080",
"description":null,
"category":null,
"type":"email_settings",
"public":false,
"hidden":false,
"status":"available",
"metadata":[
]
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 04 Feb 2021 12:05:59 GMT
Content-Type: application/json
Content-Length: 403
Connection: keep-alive
X-Compute-Request-Id: req-e92e2c38-b43a-4046-984e-64cea3a0281f
{
"settings":[
{
"created_at":"2021-02-04T11:55:43.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"smtp_port",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":null,
"value":"8080",
"description":null,
"category":null,
"type":"email_settings",
"public":false,
"hidden":0,
"status":"available",
"is_public":false,
"is_hidden":false
}
]
}{
"settings":[
{
"category":null,
"name":<String Setting_name>,
"is_public":false,
"is_hidden":false,
"metadata":{
},
"type":<String Setting type>,
"value":<String Setting Value>,
"description":null
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 04 Feb 2021 11:49:17 GMT
Content-Type: application/json
Content-Length: 1223
Connection: keep-alive
X-Compute-Request-Id: req-5a8303aa-6c90-4cd9-9b6a-8c200f9c2473HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 21 Jan 2021 11:21:57 GMT
Content-Type: application/json
Content-Length: 868
Connection: keep-alive
X-Compute-Request-Id: req-fa48f0ad-aa76-42fa-85ea-1e5461889fb3
{
"trust":[
{
"created_at":"2020-11-26T13:10:53.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
git clone -b 4.3.2 https://github.com/trilioData/triliovault-cfg-scripts.git
cd triliovault-cfg-scripts/kolla-ansible/
# For Centos and Ubuntu
cp -R ansible/roles/triliovault /usr/local/share/kolla-ansible/ansible/roles/## For Centos and Ubuntu
- Take backup of globals.yml
cp /etc/kolla/globals.yml /opt/
- If the OpenStack release is other than 'zed' append below Trilio global variables to globals.yml
cat ansible/triliovault_globals.yml >> /etc/kolla/globals.yml
- If the OpenStack release is ‘zed' append below Trilio global variables to globals.yml
cat ansible/triliovault_globals_zed.yml >> /etc/kolla/globals.yml## For Centos and Ubuntu
- Take backup of passwords.yml
cp /etc/kolla/passwords.yml /opt/
- Append Trilio global variables to passwords.yml
cat ansible/triliovault_passwords.yml >> /etc/kolla/passwords.yml
- Edit '/etc/kolla/passwords.yml', go to end of the file and set trilio passwords.# For Centos and Ubuntu
- Take backup of site.yml
cp /usr/local/share/kolla-ansible/ansible/site.yml /opt/
# If the OpenStack release is ‘yoga' append below Trilio code to site.yml
cat ansible/triliovault_site_yoga.yml >> /usr/local/share/kolla-ansible/ansible/site.yml
# If the OpenStack release is other than 'yoga' append below Trilio code to site.yml
cat ansible/triliovault_site.yml >> /usr/local/share/kolla-ansible/ansible/site.yml For example:
If your inventory file name path '/root/multinode' then use following command.
cat ansible/triliovault_inventory.txt >> /root/multinodecd triliovault-cfg-scripts/common/pip3 install -U pyyamlpython ./generate_nfs_map.pycat triliovault_nfs_map_output.yml >> ../kolla-ansible/ansible/triliovault_globals.yml
1. docker.io/trilio/kolla-{{ kolla_base_distro }}-trilio-datamover:{{ triliovault_tag }}
2. docker.io/trilio/kolla-{{ kolla_base_distro }}-trilio-datamover-api:{{ triliovault_tag }}
3. docker.io/trilio/kolla-{{ kolla_base_distro }}-trilio-horizon-plugin:{{ triliovault_tag }}
## EXAMPLE from Kolla Ubuntu source based OpenStack
docker.io/trilio/kolla-ubuntu-trilio-datamover:{{ triliovault_tag }}
docker.io/trilio/kolla-ubuntu-trilio-datamover-api:{{ triliovault_tag }}
docker.io/trilio/kolla-ubuntu-trilio-horizon-plugin:{{ triliovault_tag }}1. docker.io/trilio/kolla-{{ kolla_base_distro }}-trilio-datamover:{{ triliovault_tag }}
2. docker.io/trilio/kolla-{{ kolla_base_distro }}-trilio-datamover-api:{{ triliovault_tag }}
3. docker.io/trilio/{{ kolla_base_distro }}-binary-trilio-horizon-plugin:{{ triliovault_tag }}
## EXAMPLE from Kolla Ubuntu binary based OpenStack
docker.io/trilio/kolla-ubuntu-trilio-datamover:{{ triliovault_tag }}
docker.io/trilio/kolla-ubuntu-trilio-datamover-api:{{ triliovault_tag }}
docker.io/trilio/ubuntu-binary-trilio-horizon-plugin:{{ triliovault_tag }}nova_libvirt_default_volumes:
- "{{ node_config_directory }}/nova-libvirt/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro"
- "/run/:/run/:shared"
- "/dev:/dev"
- "/sys/fs/cgroup:/sys/fs/cgroup"
- "kolla_logs:/var/log/kolla/"
- "libvirtd:/var/lib/libvirt"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
- "
{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}
"
- "nova_libvirt_qemu:/etc/libvirt/qemu"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }
- "/var/trilio:/var/trilio:shared"nova_compute_default_volumes:
- "{{ node_config_directory }}/nova-compute/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro"
- "/run:/run:shared"
- "/dev:/dev"
- "kolla_logs:/var/log/kolla/"
- "
{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
- "libvirtd:/var/lib/libvirt"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}
"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
- "/var/trilio:/var/trilio:shared"nova_compute_ironic_default_volumes:
- "{{ node_config_directory }}/nova-compute-ironic/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
- "/var/trilio:/var/trilio:shared"ansible -i multinode control -m shell -a "docker login -u <docker-login-username> -p <docker-login-password> docker.io"kolla-ansible -i multinode pull --tags triliovaultkolla-ansible -i multinode deploy[controller] docker ps | grep "trilio-"
a2a3593f76db trilio/kolla-centos-trilio-datamover-api:<triliovault_tag> "dumb-init --single-…" 23 hours ago Up 23 hours triliovault_datamover_api
5f573caa7b02 trilio/kolla-centos-trilio-horizon-plugin:<triliovault_tag> "dumb-init --single-…" 23 hours ago Up 23 hours horizon
[compute] docker ps | grep "trilio-"
f6d443c2942c trilio/kolla-centos-trilio-datamover:<triliovault_tag> "dumb-init --single-…" 23 hours ago Up 23 hours triliovault_datamoverdocker ps -a | grep triliodocker logs trilio_datamover_api
docker logs trilio_datamoverdocker ps | grep horizon/var/log/kolla/triliovault-datamover-api/dmapi.log/var/log/kolla/triliovault-datamover/tvault-contego.log## Download the shell script
$ curl -O https://raw.githubusercontent.com/trilioData/triliovault-cfg-scripts/master/common/nova_userid.sh
## Assign executable permissions
$ chmod +x nova_userid.sh
## Execute the shell script to change 'nova' user and group id to '42436'
$ ./nova_userid.sh
## Ignore any errors and verify that 'nova' user and group id has changed to '42436'
$ id nova
uid=42436(nova) gid=42436(nova) groups=42436(nova),990(libvirt),36(kvm)HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 21 Jan 2021 11:43:36 GMT
Content-Type: application/json
Content-Length: 868
Connection: keep-alive
X-Compute-Request-Id: req-2151b327-ea74-4eec-b606-f0df358bc2a0
{
"trust":[
{
"created_at":"2021-01-21T11:43:36.140407",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"trust-b03daf38-1615-48d6-88f9-a807c728e786",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"value":"1c981a15e7a54242ae54eee6f8d32e6a",
"description":"token id for user adfa32d7746a4341b27377d6f7c61adb project 4dfe98a43bfa404785a812020066b4d6",
"category":"identity",
"type":"trust_id",
"public":false,
"hidden":1,
"status":"available",
"is_public":false,
"is_hidden":true,
"metadata":[
]
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 21 Jan 2021 11:39:12 GMT
Content-Type: application/json
Content-Length: 888
Connection: keep-alive
X-Compute-Request-Id: req-3c2f6acb-9973-4805-bae3-cd8dbcdc2cb4
{
"trust":{
"created_at":"2020-11-26T13:15:29.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"trust-54e24d8d-6bcf-449e-8021-708b4ebc65e1",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"value":"703dfabb4c5942f7a1960736dd84f4d4",
"description":"token id for user adfa32d7746a4341b27377d6f7c61adb project 4dfe98a43bfa404785a812020066b4d6",
"category":"identity",
"type":"trust_id",
"public":false,
"hidden":true,
"status":"available",
"metadata":[
{
"created_at":"2020-11-26T13:15:29.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"86aceea1-9121-43f9-b55c-f862052374ab",
"settings_name":"trust-54e24d8d-6bcf-449e-8021-708b4ebc65e1",
"settings_project_id":"4dfe98a43bfa404785a812020066b4d6",
"key":"role_name",
"value":"member"
}
]
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 21 Jan 2021 11:41:51 GMT
Content-Type: application/json
Content-Length: 888
Connection: keep-alive
X-Compute-Request-Id: req-d838a475-f4d3-44e9-8807-81a9c32ea2a8{
"scheduler_enabled":true,
"trust":{
"created_at":"2021-01-21T11:43:36.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"trust-b03daf38-1615-48d6-88f9-a807c728e786",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"value":"1c981a15e7a54242ae54eee6f8d32e6a",
"description":"token id for user adfa32d7746a4341b27377d6f7c61adb project 4dfe98a43bfa404785a812020066b4d6",
"category":"identity",
"type":"trust_id",
"public":false,
"hidden":true,
"status":"available",
"metadata":[
{
"created_at":"2021-01-21T11:43:36.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"d98d283a-b096-4a68-826a-36f99781787d",
"settings_name":"trust-b03daf38-1615-48d6-88f9-a807c728e786",
"settings_project_id":"4dfe98a43bfa404785a812020066b4d6",
"key":"role_name",
"value":"member"
}
]
},
"is_valid":true,
"scheduler_obj":{
"workload_id":"209c13fa-e743-4ccd-81f7-efdaff277a1f",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_domain_id":"default",
"user":"adfa32d7746a4341b27377d6f7c61adb",
"tenant":"4dfe98a43bfa404785a812020066b4d6"
}
}{
"trusts":{
"role_name":"member",
"is_cloud_trust":false
}
}workloadmgr setting-create [--description <description>]
[--category <category>]
[--type <type>]
[--is-public {True,False}]
[--is-hidden {True,False}]
[--metadata <key=value>]
<name> <value>workloadmgr setting-update [--description <description>]
[--category <category>]
[--type <type>]
[--is-public {True,False}]
[--is-hidden {True,False}]
[--metadata <key=value>]
<name> <value>workloadmgr setting-show [--get_hidden {True,False}] <setting_name>workloadmgr setting-delete <setting_name>workloadmgr get-global-job-schedulerworkloadmgr disable-global-job-schedulerworkloadmgr enable-global-job-scheduler[root@TVM2 ~]# pcs resource disable wlm-cron
[root@TVM2 ~]# systemctl status wlm-cron
● wlm-cron.service - workload's scheduler cron service
Loaded: loaded (/etc/systemd/system/wlm-cron.service; disabled; vendor preset : disabled)
Active: inactive (dead)
Jun 11 08:27:06 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:06 - INFO - 1...t
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 140686268624368 Child 11389 ki...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - 1...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Shutting down thread pool
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...l
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Stopping the threads
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...s
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: All threads are stopped succes...y
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - A...y
Jun 11 08:27:09 TVM2 systemd[1]: Stopped workload's scheduler cron service.
Hint: Some lines were ellipsized, use -l to show in full.
[root@TVM2 ~]# pcs resource show wlm-cron
Resource: wlm-cron (class=systemd type=wlm-cron)
Meta Attrs: target-role=Stopped
Operations: monitor interval=30s on-fail=restart timeout=300s (wlm-cron-monito r-interval-30s)
start interval=0s on-fail=restart timeout=300s (wlm-cron-start-int erval-0s)
stop interval=0s timeout=300s (wlm-cron-stop-interval-0s)
[root@TVM2 ~]# ps -ef | grep -i workloadmgr-cron
root 15379 14383 0 08:27 pts/0 00:00:00 grep --color=auto -i workloadmgr -cron
cd /home/stack
mv triliovault-cfg-scripts triliovault-cfg-scripts-old
git clone -b 4.3.2 https://github.com/trilioData/triliovault-cfg-scripts.git
cd triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/cp s3-cert.pem /home/stack/triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/puppet/trilio/files/cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/scripts/
./upload_puppet_module.sh
## Output of above command looks like following for RHOSP13, RHOSP16.1 and RHOSP16.2
Creating tarball...
Tarball created.
Creating heat environment file: /home/stack/.tripleo/environments/puppet-modules-url.yaml
Uploading file to swift: /tmp/puppet-modules-8Qjya2X/puppet-modules.tar.gz
+-----------------------+---------------------+----------------------------------+
| object | container | etag |
+-----------------------+---------------------+----------------------------------+
| puppet-modules.tar.gz | overcloud-artifacts | 368951f6a4d39cfe53b5781797b133ad |
+-----------------------+---------------------+----------------------------------+
## Output of above command looks like following for RHOSP17.0
Creating tarball...
Tarball created.
renamed '/tmp/puppet-modules-P3duCg9/puppet-modules.tar.gz' -> '/var/lib/tripleo/artifacts/overcloud-artifacts/puppet-modules.tar.gz'
Creating heat environment file: /home/stack/.tripleo/environments/puppet-modules-url.yaml
## Above command creates following file.
ls -ll /home/stack/.tripleo/environments/puppet-modules-url.yaml
Trilio Datamover container: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
Trilio Datamover Api Container: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
Trilio horizon plugin: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13Trilio Datamover container: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
Trilio Datamover Api Container: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
Trilio horizon plugin: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1Trilio Datamover container: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
Trilio Datamover Api Container: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
Trilio horizon plugin: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2Trilio Datamover container: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp17.0
Trilio Datamover Api Container: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp17.0
Trilio horizon plugin: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp17.0# For RHOSP13
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
DockerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13
# For RHOSP16.1
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1
# For RHOSP16.2
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
ContainerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2
# For RHOSP17.0
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp17.0
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp17.0
ContainerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp17.0
cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/scripts/
./prepare_trilio_images.sh <undercloud_ip> <container_tag>
# Example:
./prepare_trilio_images.sh 192.168.13.34 <HOTFIX-TAG-VERSION>-rhosp13
## Verify changes
# For RHOSP13
$ grep '<HOTFIX-TAG-VERSION>-rhosp13' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: 172.25.2.2:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
DockerTrilioDmApiImage: 172.25.2.2:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
DockerHorizonImage: 172.25.2.2:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/scripts/
sudo ./prepare_trilio_images.sh <UNDERCLOUD_REGISTRY_HOSTNAME> <CONTAINER_TAG>
## Run following command to find 'UNDERCLOUD_REGISTRY_HOSTNAME'.
-- In the below example 'trilio-undercloud.ctlplane.localdomain' is <UNDERCLOUD_REGISTRY_HOSTNAME>
$ openstack tripleo container image list | grep keystone
| docker://trilio-undercloud.ctlplane.localdomain:8787/rhosp-rhel8/openstack-keystone:16.0-82 |
| docker://trilio-undercloud.ctlplane.localdomain:8787/rhosp-rhel8/openstack-barbican-keystone-listener:16.0-84
## 'CONTAINER_TAG' format for RHOSP16.1: <<HOTFIX-TAG-VERSION>>-rhosp16.1
## 'CONTAINER_TAG' format for RHOSP16.2: <<HOTFIX-TAG-VERSION>>-rhosp16.2
## 'CONTAINER_TAG' format for RHOSP17.0: <<HOTFIX-TAG-VERSION>>-rhosp17.0
## Example
sudo ./prepare_trilio_images.sh trilio-undercloud.ctlplane.localdomain <HOTFIX-TAG-VERSION>-rhosp16.1(undercloud) [stack@undercloud redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>]$ openstack tripleo container image list | grep trilio
| docker://undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1 | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1 | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1 |
-----------------------------------------------------------------------------------------------------
(undercloud) [stack@undercloud redhat-director-scripts]$ grep 'Image' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1$ grep '<HOTFIX-TAG-VERSION>-rhosp13' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
DockerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13## For RHOSP16.1
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1
## For RHOSP16.2
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
ContainerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2
## For RHOSP7.0
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp17.0
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp17.0
ContainerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp17.0
RHOSP13: /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp13/environments/trilio_env.yaml
RHOSP16.1: /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/environments/trilio_env.yaml
RHOSP16.2: /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.2/environments/trilio_env.yaml
RHOSP17.0: /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp17.0/environments/trilio_env.yamlcd triliovault-cfg-scripts/common/(undercloud) [stack@ucqa161 ~]$ openstack server list
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+
| ID | Name | Status | Networks | Image | Flavor |
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+
| 8c3d04ae-fcdd-431c-afa6-9a50f3cb2c0d | overcloudtrain1-controller-2 | ACTIVE | ctlplane=172.30.5.18 | overcloud-full | control |
| 103dfd3e-d073-4123-9223-b8cf8c7398fe | overcloudtrain1-controller-0 | ACTIVE | ctlplane=172.30.5.11 | overcloud-full | control |
| a3541849-2e9b-4aa0-9fa9-91e7d24f0149 | overcloudtrain1-controller-1 | ACTIVE | ctlplane=172.30.5.25 | overcloud-full | control |
| 74a9f530-0c7b-49c4-9a1f-87e7eeda91c0 | overcloudtrain1-novacompute-0 | ACTIVE | ctlplane=172.30.5.30 | overcloud-full | compute |
| c1664ac3-7d9c-4a36-b375-0e4ee19e93e4 | overcloudtrain1-novacompute-1 | ACTIVE | ctlplane=172.30.5.15 | overcloud-full | compute |
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+## On Python3 env
sudo pip3 install PyYAML==5.1 3
## On Python2 env
sudo pip install PyYAML==5.1## On Python3 env
python3 ./generate_nfs_map.py
## On Python2 env
python ./generate_nfs_map.pygrep ':.*:' triliovault_nfs_map_output.yml >> ../redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/environments/trilio_nfs_map.yaml-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/environments/trilio_nfs_map.yamlopenstack overcloud deploy --templates \
-e /home/stack/templates/node-info.yaml \
-e /home/stack/templates/overcloud_images.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/environments/trilio_env.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/enable-tls.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/inject-trust-anchor.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/environments/trilio_env_tls_endpoints_public_dns.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/environments/trilio_nfs_map.yaml \
--ntp-server 192.168.1.34 \
--libvirt-type qemu \
--log-file overcloud_deploy.log \
-r /home/stack/templates/roles_data.yaml[root@overcloud-controller-0 heat-admin]# podman ps | grep trilio
26fcb9194566 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2 kolla_start 5 days ago Up 5 days ago trilio_dmapi
094971d0f5a9 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2 kolla_start 5 days ago Up 5 days ago horizon[root@overcloud-novacompute-0 heat-admin]# podman ps | grep trilio
b1840444cc59 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2 kolla_start 5 days ago Up 5 days ago trilio_datamover[root@overcloud-controller-0 heat-admin]# podman ps | grep horizon
094971d0f5a9 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2 kolla_start 5 days ago Up 5 days ago horizon## Either of the below workarounds should be performed on all the controller nodes where issue occurs for horizon pod.
option-1: Restart the memcached service on controller using systemctl (command: systemctl restart tripleo_memcached.service)
option-2: Restart the memcached pod (command: podman restart memcached)HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 15:40:56 GMT
Content-Type: application/json
Content-Length: 1625
Connection: keep-alive
X-Compute-Request-Id: req-2ad95c02-54c6-4908-887b-c16c5e2f20fe
{
"quota_types":[
{
"created_at":"2020-10-19T10:05:52.000000",
"updated_at":"2020-10-19T10:07:32.000000",
"deleted_at":null,
[root@TVM2 ~]# pcs resource disable wlm-cron
[root@TVM2 ~]# systemctl status wlm-cron
● wlm-cron.service - workload's scheduler cron service
Loaded: loaded (/etc/systemd/system/wlm-cron.service; disabled; vendor preset : disabled)
Active: inactive (dead)
Jun 11 08:27:06 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:06 - INFO - 1...t
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 140686268624368 Child 11389 ki...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - 1...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Shutting down thread pool
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...l
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Stopping the threads
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...s
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: All threads are stopped succes...y
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - A...y
Jun 11 08:27:09 TVM2 systemd[1]: Stopped workload's scheduler cron service.
Hint: Some lines were ellipsized, use -l to show in full.
[root@TVM2 ~]# pcs resource show wlm-cron
Resource: wlm-cron (class=systemd type=wlm-cron)
Meta Attrs: target-role=Stopped
Operations: monitor interval=30s on-fail=restart timeout=300s (wlm-cron-monito r-interval-30s)
start interval=0s on-fail=restart timeout=300s (wlm-cron-start-int erval-0s)
stop interval=0s timeout=300s (wlm-cron-stop-interval-0s)
[root@TVM2 ~]# ps -ef | grep -i workloadmgr-cron
root 15379 14383 0 08:27 pts/0 00:00:00 grep --color=auto -i workloadmgr -cron
mv triliovault-cfg-scripts triliovault-cfg-scripts_old
mv /usr/local/share/kolla-ansible/ansible/roles/triliovault /opt/triliovault_oldgit clone -b 4.3.2 https://github.com/trilioData/triliovault-cfg-scripts.gitcd triliovault-cfg-scripts/kolla-ansible/cp -R ansible/roles/triliovault /usr/local/share/kolla-ansible/ansible/roles/#copy the backed-up original globals.yml which is not having triliovault variables iniside current globals.yml
cp /opt/globals.yml /etc/kolla/globals.yml
#Append Trilio global variables to globals.yml
cat ansible/triliovault_globals.yml >> /etc/kolla/globals.ymlTake backup of current password file
cp /etc/kolla/password.yml /opt/password-<CURRENT-RELEASE>.yml
#Reset the passwords file to default one by reverting the backed-up original password.yml. This backup would have been taken during previous install/upgrade.
cp /opt/password.yml /etc/kolla/password.yml
#Append Trilio password variables to passwords.yml
cat ansible/triliovault_passwords.yml >> /etc/kolla/passwords.yml
#File /etc/kolla/passwords.yml to be edited to set passwords.
#To set the passwords, it's recommended to use the same passwords as done during previous T4O deployment, as present in the password file backup (/opt/password-<CURRENT-RELEASE>.yml).
#Any additional passwords (in triliovault_passwords.yml), should be set by the user in /etc/kolla/passwords.yml.#Take backup of current site.yml file
cp /usr/local/share/kolla-ansible/ansible/site.yml /opt/site-<CURRENT-RELEASE>.yml
#Reset the site.yml to default one by reverting the backed-up original site.yml inside current site.yml. This backup would have been taken during previous install/upgrade.
cp /opt/site.yml /usr/local/share/kolla-ansible/ansible/site.yml
# If the OpenStack release is ‘yoga' append below Trilio code to site.yml
cat ansible/triliovault_site_yoga.yml >> /usr/local/share/kolla-ansible/ansible/site.yml
# If the OpenStack release is other than 'yoga' append below Trilio code to site.yml
cat ansible/triliovault_site.yml >> /usr/local/share/kolla-ansible/ansible/site.yml For example:
If your inventory file name path '/root/multinode' then use following
#cleanup old T4O groups from /root/multinode and copy latest triliovault inventory file
cat ansible/triliovault_inventory.txt >> /root/multinodecd triliovault-cfg-scripts/common/pip3 install -U pyyamlpython ./generate_nfs_map.pycat triliovault_nfs_map_output.yml >> ../kolla-ansible/ansible/triliovault_globals.ymlnova_libvirt_default_volumes:
- "{{ node_config_directory }}/nova-libvirt/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro"
- "/run/:/run/:shared"
- "/dev:/dev"
- "/sys/fs/cgroup:/sys/fs/cgroup"
- "kolla_logs:/var/log/kolla/"
- "libvirtd:/var/lib/libvirt"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
- "
{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}
"
- "nova_libvirt_qemu:/etc/libvirt/qemu"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }
- "/var/trilio:/var/trilio:shared"nova_compute_default_volumes:
- "{{ node_config_directory }}/nova-compute/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro"
- "/run:/run:shared"
- "/dev:/dev"
- "kolla_logs:/var/log/kolla/"
- "
{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
- "libvirtd:/var/lib/libvirt"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}
"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
- "/var/trilio:/var/trilio:shared"nova_compute_ironic_default_volumes:
- "{{ node_config_directory }}/nova-compute-ironic/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
- "/var/trilio:/var/trilio:shared"docker.io/trilio/kolla-{{ kolla_base_distro }}-trilio-datamover:{{ triliovault_tag }}
docker.io/trilio/kolla-{{ kolla_base_distro }}-trilio-datamover-api:{{ triliovault_tag }}
docker.io/trilio/kolla-{{ kolla_base_distro }}-trilio-horizon-plugin:{{ triliovault_tag }}
## EXAMPLE from Kolla Ubuntu source based OpenStack
docker.io/trilio/kolla-ubuntu-trilio-datamover:{{ triliovault_tag }}
docker.io/trilio/kolla-ubuntu-trilio-datamover-api:{{ triliovault_tag }}
docker.io/trilio/kolla-ubuntu-trilio-horizon-plugin:{{ triliovault_tag }}docker.io/trilio/kolla-{{ kolla_base_distro }}-trilio-datamover:{{ triliovault_tag }}
docker.io/trilio/kolla-{{ kolla_base_distro }}-trilio-datamover-api:{{ triliovault_tag }}
docker.io/trilio/{{ kolla_base_distro }}-binary-trilio-horizon-plugin:{{ triliovault_tag }}
## EXAMPLE from Kolla Ubuntu binary based OpenStack
docker.io/trilio/kolla-ubuntu-trilio-datamover:{{ triliovault_tag }}
docker.io/trilio/kolla-ubuntu-trilio-datamover-api:{{ triliovault_tag }}
docker.io/trilio/ubuntu-binary-trilio-horizon-plugin:{{ triliovault_tag }}ansible -i multinode control -m shell -a "docker login -u <docker-login-username> -p <docker-login-password> docker.io"kolla-ansible -i multinode pull --tags triliovaultkolla-ansible -i multinode upgrade[controller] docker ps | grep "trilio-"
a2a3593f76db trilio/kolla-centos-trilio-datamover-api:<triliovault_tag> "dumb-init --single-…" 23 hours ago Up 23 hours triliovault_datamover_api
5f573caa7b02 trilio/kolla-centos-trilio-horizon-plugin:<triliovault_tag> "dumb-init --single-…" 23 hours ago Up 23 hours horizon
[compute] docker ps | grep "trilio-"
f6d443c2942c trilio/kolla-centos-trilio-datamover:<triliovault_tag> "dumb-init --single-…" 23 hours ago Up 23 hours triliovault_datamoverretries 5
timeout http-request 10m
timeout queue 10m
timeout connect 10m
timeout client 10m
timeout server 10m
timeout check 10m
balance roundrobin
maxconn 50000/usr/local/share/kolla-ansible/ansible/roles/triliovault/defaults/main.yml/etc/kolla/haproxy/services.d/triliovault-datamover-api.cfgHTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 15:44:43 GMT
Content-Type: application/json
Content-Length: 342
Connection: keep-alive
X-Compute-Request-Id: req-5bf629fe-ffa2-4c90-b704-5178ba2ab09b
{
"quota_type":{
"created_at":"2020-10-19T10:05:52.000000",
"updated_at":"2020-10-19T10:07:32.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"1c5d4290-2e08-11ea-889c-7440bb00b67d",
"display_name":"Workloads",
"display_description":"Total number of workload creation allowed per project",
"status":"available"
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 15:51:51 GMT
Content-Type: application/json
Content-Length: 24
Connection: keep-alive
X-Compute-Request-Id: req-08c8cdb6-b249-4650-91fb-79a6f7497927
{
"allowed_quotas":[
{
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 16:01:39 GMT
Content-Type: application/json
Content-Length: 766
Connection: keep-alive
X-Compute-Request-Id: req-e570ce15-de0d-48ac-a9e8-60af429aebc0
{
"allowed_quotas":[
{
"id":"262b117d-e406-4209-8964-004b19a8d422",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"quota_type_id":"1c5d4290-2e08-11ea-889c-7440bb00b67d",
"allowed_value":5,
"high_watermark":4,
"version":"4.0.115",
"quota_type_name":"Workloads"
},
{
"id":"68e7203d-8a38-4776-ba58-051e6d289ee0",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"quota_type_id":"f02dd7a6-2e08-11ea-889c-7440bb00b67d",
"allowed_value":-1,
"high_watermark":-1,
"version":"4.0.115",
"quota_type_name":"Storage"
},
{
"id":"ed67765b-aea8-4898-bb1c-7c01ecb897d2",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"quota_type_id":"be323f58-2e08-11ea-889c-7440bb00b67d",
"allowed_value":50,
"high_watermark":25,
"version":"4.0.115",
"quota_type_name":"VMs"
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 16:15:07 GMT
Content-Type: application/json
Content-Length: 268
Connection: keep-alive
X-Compute-Request-Id: req-d87a57cd-c14c-44dd-931e-363158376cb7
{
"allowed_quotas":{
"id":"262b117d-e406-4209-8964-004b19a8d422",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"quota_type_id":"1c5d4290-2e08-11ea-889c-7440bb00b67d",
"allowed_value":5,
"high_watermark":4,
"version":"4.0.115",
"quota_type_name":"Workloads"
}
}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 16:24:04 GMT
Content-Type: application/json
Content-Length: 24
Connection: keep-alive
X-Compute-Request-Id: req-a4c02ee5-b86e-4808-92ba-c363b287f1a2
{"allowed_quotas": [{}]}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 16:33:09 GMT
Content-Type: text/html; charset=UTF-8
Content-Length: 0
Connection: keep-alive{
"allowed_quotas":[
{
"project_id":"<project_id>",
"quota_type_id":"<quota_type_id>",
"allowed_value":"<integer>",
"high_watermark":"<Integer>"
}
]
}{
"allowed_quotas":{
"project_id":"c76b3355a164498aa95ddbc960adc238",
"allowed_value":"20000",
"high_watermark":"18000"
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 10:34:10 GMT
Content-Type: application/json
Content-Length: 7888
Connection: keep-alive
X-Compute-Request-Id: req-9d73e5e6-ca5a-4c07-bdf2-ec2e688fc339
{
"workloads":[
{
"created_at":"2020-11-02T13:40:06.000000",
"updated_at":"2020-11-09T09:53:30.000000",
"id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"availability_zone":"nova",
"workload_type_id":"f82ce76f-17fe-438b-aa37-7a023058e50d",
"name":"Workload_1",
"description":"no-description",
"interval":null,
"storage_usage":null,
"instances":null,
"metadata":[
{
"created_at":"2020-11-09T09:57:23.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"ee27bf14-e460-454b-abf5-c17e3d484ec2",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"63cd8d96-1c4a-4e61-b1e0-3ae6a17bf533",
"value":"c8468146-8117-48a4-bfd7-49381938f636"
},
{
"created_at":"2020-11-05T10:27:06.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"22d3e3d6-5a37-48e9-82a1-af2dda11f476",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"67d6a100-fee6-4aa5-83a1-66b070d2eabe",
"value":"1fb104bf-7e2b-4cb6-84f6-96aabc8f1dd2"
},
{
"created_at":"2020-11-09T09:37:20.000000",
"updated_at":"2020-11-09T09:57:23.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"61615532-6165-45a2-91e2-fbad9eb0b284",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"b083bb70-e384-4107-b951-8e9e7bbac380",
"value":"c8468146-8117-48a4-bfd7-49381938f636"
},
{
"created_at":"2020-11-02T13:40:24.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"5a53c8ee-4482-4d6a-86f2-654d2b06e28c",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"backup_media_target",
"value":"10.10.2.20:/upstream"
},
{
"created_at":"2020-11-05T10:27:14.000000",
"updated_at":"2020-11-09T09:57:23.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"5cb4dc86-a232-4916-86bf-42a0d17f1439",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"e33c1eea-c533-4945-864d-0da1fc002070",
"value":"c8468146-8117-48a4-bfd7-49381938f636"
},
{
"created_at":"2020-11-02T13:40:06.000000",
"updated_at":"2020-11-02T14:10:30.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"506cd466-1e15-416f-9f8e-b9bdb942f3e1",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"hostnames",
"value":"[\"cirros-1\", \"cirros-2\"]"
},
{
"created_at":"2020-11-02T13:40:06.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"093a1221-edb6-4957-8923-cf271f7e43ce",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"pause_at_snapshot",
"value":"0"
},
{
"created_at":"2020-11-02T13:40:06.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"79baaba8-857e-410f-9d2a-8b14670c4722",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"policy_id",
"value":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd"
},
{
"created_at":"2020-11-02T13:40:06.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"4e23fa3d-1a79-4dc8-86cb-dc1ecbd7008e",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"preferredgroup",
"value":"[]"
},
{
"created_at":"2020-11-02T14:10:30.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"ed06cca6-83d8-4d4c-913b-30c8b8418b80",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"topology",
"value":"\"\\\"\\\"\""
},
{
"created_at":"2020-11-02T13:40:23.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"4b6a80f7-b011-48d4-b5fd-f705448de076",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"workload_approx_backup_size",
"value":"6"
}
],
"jobschedule":"(dp0\nVfullbackup_interval\np1\nV-1\np2\nsVretention_policy_type\np3\nVNumber of Snapshots to Keep\np4\nsVend_date\np5\nVNo End\np6\nsVstart_time\np7\nV01:45 PM\np8\nsVinterval\np9\nV5\np10\nsVenabled\np11\nI00\nsVretention_policy_value\np12\nV10\np13\nsVtimezone\np14\nVUTC\np15\nsVstart_date\np16\nV11/02/2020\np17\nsVappliance_timezone\np18\nVUTC\np19\ns.",
"status":"locked",
"error_msg":null,
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/4dfe98a43bfa404785a812020066b4d6/workloads/18b809de-d7c8-41e2-867d-4a306407fb11"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/4dfe98a43bfa404785a812020066b4d6/workloads/18b809de-d7c8-41e2-867d-4a306407fb11"
}
],
"scheduler_trust":null
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 10:42:01 GMT
Content-Type: application/json
Content-Length: 120143
Connection: keep-alive
X-Compute-Request-Id: req-b443f6e7-8d8e-413f-8d91-7c30ba166e8c
{
"workloads":[
{
"created_at":"2019-04-24T14:09:20.000000",
"updated_at":"2019-05-16T09:10:17.000000",
"id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"user_id":"6ef8135faedc4259baac5871e09f0044",
"project_id":"863b6e2a8e4747f8ba80fdce1ccf332e",
"availability_zone":"nova",
"workload_type_id":"f82ce76f-17fe-438b-aa37-7a023058e50d",
"name":"comdirect_test",
"description":"Daily UNIX Backup 03:15 PM Full 7D Keep 8",
"interval":null,
"storage_usage":null,
"instances":null,
"metadata":[
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":false,
"created_at":"2019-05-16T09:13:54.000000",
"updated_at":null,
"value":"ca544215-1182-4a8f-bf81-910f5470887a",
"version":"3.2.46",
"key":"40965cbb-d352-4618-b8b0-ea064b4819bb",
"deleted_at":null,
"id":"5184260e-8bb3-4c52-abfa-1adc05fe6997"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":true,
"created_at":"2019-04-24T14:09:30.000000",
"updated_at":"2019-05-16T09:01:23.000000",
"value":"10.10.2.20:/upstream",
"version":"3.2.46",
"key":"backup_media_target",
"deleted_at":"2019-05-16T09:01:23.000000",
"id":"02dd0630-7118-485c-9e42-b01d23aa882c"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":false,
"created_at":"2019-05-16T09:13:51.000000",
"updated_at":null,
"value":"51693eca-8714-49be-b409-f1f1709db595",
"version":"3.2.46",
"key":"eb7d6b13-21e4-45d1-b888-d3978ab37216",
"deleted_at":null,
"id":"4b79a4ef-83d6-4e5a-afb3-f4e160c5f257"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":true,
"created_at":"2019-04-24T14:09:20.000000",
"updated_at":"2019-05-16T09:01:23.000000",
"value":"[\"Comdirect_test-2\", \"Comdirect_test-1\"]",
"version":"3.2.46",
"key":"hostnames",
"deleted_at":"2019-05-16T09:01:23.000000",
"id":"0cb6a870-8f30-4325-a4ce-e9604370198e"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":false,
"created_at":"2019-04-24T14:09:20.000000",
"updated_at":"2019-05-16T09:01:23.000000",
"value":"0",
"version":"3.2.46",
"key":"pause_at_snapshot",
"deleted_at":null,
"id":"5d4f109c-9dc2-48f3-a12a-e8b8fa4f5be9"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":true,
"created_at":"2019-04-24T14:09:20.000000",
"updated_at":"2019-05-16T09:01:23.000000",
"value":"[]",
"version":"3.2.46",
"key":"preferredgroup",
"deleted_at":"2019-05-16T09:01:23.000000",
"id":"9a223fbc-7cad-4c2c-ae8a-75e6ee8a6efc"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":true,
"created_at":"2019-04-24T14:11:49.000000",
"updated_at":"2019-05-16T09:01:23.000000",
"value":"\"\\\"\\\"\"",
"version":"3.2.46",
"key":"topology",
"deleted_at":"2019-05-16T09:01:23.000000",
"id":"77e436c0-0921-4919-97f4-feb58fb19e06"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":true,
"created_at":"2019-04-24T14:09:30.000000",
"updated_at":"2019-05-16T09:01:23.000000",
"value":"121",
"version":"3.2.46",
"key":"workload_approx_backup_size",
"deleted_at":"2019-05-16T09:01:23.000000",
"id":"79aa04dd-a102-4bd8-b672-5b7a6ce9e125"
}
],
"jobschedule":"(dp1\nVfullbackup_interval\np2\nV7\nsVretention_policy_type\np3\nVNumber of days to retain Snapshots\np4\nsVend_date\np5\nV05/31/2019\np6\nsVstart_time\np7\nS'02:15 PM'\np8\nsVinterval\np9\nV24 hrs\np10\nsVenabled\np11\nI01\nsVretention_policy_value\np12\nI8\nsS'appliance_timezone'\np13\nS'UTC'\np14\nsVtimezone\np15\nVAfrica/Porto-Novo\np16\nsVstart_date\np17\nS'04/24/2019'\np18\ns.",
"status":"locked",
"error_msg":null,
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/4dfe98a43bfa404785a812020066b4d6/workloads/orphan_workloads/4dfe98a43bfa404785a812020066b4d6/workloads/0ed39f25-5df2-4cc5-820f-2af2cde6aa67"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/4dfe98a43bfa404785a812020066b4d6/workloads/orphan_workloads/4dfe98a43bfa404785a812020066b4d6/workloads/0ed39f25-5df2-4cc5-820f-2af2cde6aa67"
}
],
"scheduler_trust":null
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 11:03:55 GMT
Content-Type: application/json
Content-Length: 100
Connection: keep-alive
X-Compute-Request-Id: req-0e58b419-f64c-47e1-adb9-21ea2a255839
{
"workloads":{
"imported_workloads":[
"faa03-f69a-45d5-a6fc-ae0119c77974"
],
"failed_workloads":[
]
}
}{
"workload_ids":[
"<workload_id>"
],
"upgrade":true
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 22 Aug 2023 11:03:55 GMT
Content-Type: application/json
Content-Length: 100
Connection: keep-alive
X-Compute-Request-Id: req-0e58b419-f64c-47e1-adb9-21ea2a255839
{'jobid':[{
"id": "1",
"created_at": "22nd Aug 2023",
"wllist": [{
'id':'123',
'name':'Test-WL-01',
'progress': 10}, {
'id':'124',
'name': 'Test-W:-02',
'progress': 25}],
"completedat": "22nd Aug 2023",
"status": 'In-Progress'
}]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 29 Oct 2020 14:55:40 GMT
Content-Type: application/json
Content-Length: 3480
Connection: keep-alive
X-Compute-Request-Id: req-a2e49b7e-ce0f-4dcb-9e61-c5a4756d9948
{
"workloads":[
{
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"id":"8ee7a61d-a051-44a7-b633-b495e6f8fc1d",
"name":"worklaod1",
"snapshots_info":"",
"description":"no-description",
"workload_type_id":"f82ce76f-17fe-438b-aa37-7a023058e50d",
"status":"available",
"created_at":"2020-10-26T12:07:01.000000",
"updated_at":"2020-10-29T12:22:26.000000",
"scheduler_trust":null,
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/4dfe98a43bfa404785a812020066b4d6/workloads/8ee7a61d-a051-44a7-b633-b495e6f8fc1d"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/4dfe98a43bfa404785a812020066b4d6/workloads/8ee7a61d-a051-44a7-b633-b495e6f8fc1d"
}
]
},
{
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"id":"a90d002a-85e4-44d1-96ac-7ffc5d0a5a84",
"name":"workload2",
"snapshots_info":"",
"description":"no-description",
"workload_type_id":"f82ce76f-17fe-438b-aa37-7a023058e50d",
"status":"available",
"created_at":"2020-10-20T09:51:15.000000",
"updated_at":"2020-10-29T10:03:33.000000",
"scheduler_trust":null,
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/4dfe98a43bfa404785a812020066b4d6/workloads/a90d002a-85e4-44d1-96ac-7ffc5d0a5a84"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/4dfe98a43bfa404785a812020066b4d6/workloads/a90d002a-85e4-44d1-96ac-7ffc5d0a5a84"
}
]
}
]
}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Thu, 29 Oct 2020 15:42:02 GMT
Content-Type: application/json
Content-Length: 703
Connection: keep-alive
X-Compute-Request-Id: req-443b9dea-36e6-4721-a11b-4dce3c651ede
{
"workload":{
"project_id":"c76b3355a164498aa95ddbc960adc238",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"id":"c4e3aeeb-7d87-4c49-99ed-677e51ba715e",
"name":"API created",
"snapshots_info":"",
"description":"API description",
"workload_type_id":"f82ce76f-17fe-438b-aa37-7a023058e50d",
"status":"creating",
"created_at":"2020-10-29T15:42:01.000000",
"updated_at":"2020-10-29T15:42:01.000000",
"scheduler_trust":null,
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/c76b3355a164498aa95ddbc960adc238/workloads/c4e3aeeb-7d87-4c49-99ed-677e51ba715e"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/c76b3355a164498aa95ddbc960adc238/workloads/c4e3aeeb-7d87-4c49-99ed-677e51ba715e"
}
]
}
}retention_policy_type
retention_policy_value
interval{
"workload":{
"name":"<name of the Workload>",
"description":"<description of workload>",
"workload_type_id":"<ID of the chosen Workload Type",
"source_platform":"openstack",
"instances":[
{
"instance-id":"<Instance ID>"
},
{
"instance-id":"<Instance ID>"
}
],
"jobschedule":{
"retention_policy_type":"<'Number of Snapshots to Keep'/'Number of days to retain Snapshots'>",
"retention_policy_value":"<Integer>"
"timezone":"<timezone>",
"start_date":"<Date format: MM/DD/YYYY>"
"end_date":"<Date format MM/DD/YYYY>",
"start_time":"<Time format: HH:MM AM/PM>",
"interval":"<Format: Integer hr",
"enabled":"<True/False>"
},
"metadata":{
<key>:<value>,
"policy_id":"<policy_id>"
}
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Mon, 02 Nov 2020 12:08:42 GMT
Content-Type: application/json
Content-Length: 1536
Connection: keep-alive
X-Compute-Request-Id: req-afb76abb-aa33-427e-8219-04fc2b91bce0
{
"workload":{
"created_at":"2020-10-29T15:42:01.000000",
"updated_at":"2020-10-29T15:42:18.000000",
"id":"c4e3aeeb-7d87-4c49-99ed-677e51ba715e",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"availability_zone":"nova",
"workload_type_id":"f82ce76f-17fe-438b-aa37-7a023058e50d",
"name":"API created",
"description":"API description",
"interval":null,
"storage_usage":{
"usage":0,
"full":{
"snap_count":0,
"usage":0
},
"incremental":{
"snap_count":0,
"usage":0
}
},
"instances":[
{
"id":"08dab61c-6efd-44d3-a9ed-8e789d338c1b",
"name":"cirros-4",
"metadata":{
}
},
{
"id":"7c1bb5d2-aa5a-44f7-abcd-2d76b819b4c8",
"name":"cirros-3",
"metadata":{
}
}
],
"metadata":{
"hostnames":"[]",
"meta":"data",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"preferredgroup":"[]",
"workload_approx_backup_size":"6"
},
"jobschedule":{
"retention_policy_type":"Number of Snapshots to Keep",
"end_date":"15/27/2020",
"start_time":"3:00 PM",
"interval":"5",
"enabled":false,
"retention_policy_value":"10",
"timezone":"UTC+2",
"start_date":"10/27/2020",
"fullbackup_interval":"-1",
"appliance_timezone":"UTC",
"global_jobscheduler":true
},
"status":"available",
"error_msg":null,
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/c76b3355a164498aa95ddbc960adc238/workloads/c4e3aeeb-7d87-4c49-99ed-677e51ba715e"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/c76b3355a164498aa95ddbc960adc238/workloads/c4e3aeeb-7d87-4c49-99ed-677e51ba715e"
}
],
"scheduler_trust":null
}
}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Mon, 02 Nov 2020 12:31:42 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-674a5d71-4aeb-4f99-90ce-7e8d3158d137retention_policy_type
retention_policy_value
interval{
"workload":{
"name":"<name>",
"description":"<description>"
"instances":[
{
"instance-id":"<instance_id>"
},
{
"instance-id":"<instance_id>"
}
],
"jobschedule":{
"retention_policy_type":"<'Number of Snapshots to Keep'/'Number of days to retain Snapshots'>",
"retention_policy_value":"<Integer>",
"timezone":"<timezone>",
"start_time":"<HH:MM AM/PM>",
"end_date":"<MM/DD/YYYY>",
"interval":"<Integer hr>",
"enabled":"<True/False>"
},
"metadata":{
"meta":"data",
"policy_id":"<policy_id>"
},
}
}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Mon, 02 Nov 2020 13:31:00 GMT
Content-Type: text/html; charset=UTF-8
Content-Length: 0
Connection: keep-aliveHTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Mon, 02 Nov 2020 13:41:55 GMT
Content-Type: text/html; charset=UTF-8
Content-Length: 0
Connection: keep-aliveHTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Mon, 02 Nov 2020 13:52:30 GMT
Content-Type: text/html; charset=UTF-8
Content-Length: 0
Connection: keep-aliveHTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 04 Nov 2020 12:58:38 GMT
Content-Type: application/json
Content-Length: 266
Connection: keep-alive
X-Compute-Request-Id: req-ed391cf9-aa56-4c53-8153-fd7fb238c4b9
{
"snapshots":[
{
"id":"1ff16412-a0cd-4e6a-9b4a-b5d4440fffc4",
"created_at":"2020-11-02T14:03:18.000000",
"status":"available",
"snapshot_type":"full",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"name":"snapshot",
"description":"-",
"host":"TVM1"
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 04 Nov 2020 13:58:38 GMT
Content-Type: application/json
Content-Length: 283
Connection: keep-alive
X-Compute-Request-Id: req-fb8dc382-e5de-4665-8d88-c75b2e473f5c
{
"snapshot":{
"id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"created_at":"2020-11-04T13:58:37.694637",
"status":"creating",
"snapshot_type":"full",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"name":"API taken 2",
"description":"API taken description 2",
"host":""
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 04 Nov 2020 14:07:18 GMT
Content-Type: application/json
Content-Length: 6609
Connection: keep-alive
X-Compute-Request-Id: req-f88fb28f-f4ce-4585-9c3c-ebe08a3f60cd
{
"snapshot":{
"id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"created_at":"2020-11-04T13:58:37.000000",
"updated_at":"2020-11-04T14:06:03.000000",
"finished_at":"2020-11-04T14:06:03.000000",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"status":"available",
"snapshot_type":"full",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"instances":[
{
"id":"67d6a100-fee6-4aa5-83a1-66b070d2eabe",
"name":"cirros-2",
"status":"available",
"metadata":{
"availability_zone":"nova",
"config_drive":"",
"data_transfer_time":"0",
"object_store_transfer_time":"0",
"root_partition_type":"Linux",
"trilio_ordered_interfaces":"192.168.100.80",
"vm_metadata":"{\"workload_name\": \"Workload_1\", \"workload_id\": \"18b809de-d7c8-41e2-867d-4a306407fb11\", \"trilio_ordered_interfaces\": \"192.168.100.80\", \"config_drive\": \"\"}",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"workload_name":"Workload_1"
},
"flavor":{
"vcpus":"1",
"ram":"512",
"disk":"1",
"ephemeral":"0"
},
"security_group":[
{
"name":"default",
"security_group_type":"neutron"
}
],
"nics":[
{
"mac_address":"fa:16:3e:cf:10:91",
"ip_address":"192.168.100.80",
"network":{
"id":"5fb7027d-a2ac-4a21-9ee1-438c281d2b26",
"name":"robert_internal",
"cidr":null,
"network_type":"neutron",
"subnet":{
"id":"b7b54304-aa82-4d50-91e6-66445ab56db4",
"name":"robert_internal",
"cidr":"192.168.100.0/24",
"ip_version":4,
"gateway_ip":"192.168.100.1"
}
}
}
],
"vdisks":[
{
"label":null,
"resource_id":"fa888089-5715-4228-9e5a-699f8f9d59ba",
"restore_size":1073741824,
"vm_id":"67d6a100-fee6-4aa5-83a1-66b070d2eabe",
"volume_id":"51491d30-9818-4332-b056-1f174e65d3e3",
"volume_name":"51491d30-9818-4332-b056-1f174e65d3e3",
"volume_size":"1",
"volume_type":"iscsi",
"volume_mountpoint":"/dev/vda",
"availability_zone":"nova",
"metadata":{
"readonly":"False",
"attached_mode":"rw"
}
}
]
},
{
"id":"e33c1eea-c533-4945-864d-0da1fc002070",
"name":"cirros-1",
"status":"available",
"metadata":{
"availability_zone":"nova",
"config_drive":"",
"data_transfer_time":"0",
"object_store_transfer_time":"0",
"root_partition_type":"Linux",
"trilio_ordered_interfaces":"192.168.100.176",
"vm_metadata":"{\"workload_name\": \"Workload_1\", \"workload_id\": \"18b809de-d7c8-41e2-867d-4a306407fb11\", \"trilio_ordered_interfaces\": \"192.168.100.176\", \"config_drive\": \"\"}",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"workload_name":"Workload_1"
},
"flavor":{
"vcpus":"1",
"ram":"512",
"disk":"1",
"ephemeral":"0"
},
"security_group":[
{
"name":"default",
"security_group_type":"neutron"
}
],
"nics":[
{
"mac_address":"fa:16:3e:cf:4d:27",
"ip_address":"192.168.100.176",
"network":{
"id":"5fb7027d-a2ac-4a21-9ee1-438c281d2b26",
"name":"robert_internal",
"cidr":null,
"network_type":"neutron",
"subnet":{
"id":"b7b54304-aa82-4d50-91e6-66445ab56db4",
"name":"robert_internal",
"cidr":"192.168.100.0/24",
"ip_version":4,
"gateway_ip":"192.168.100.1"
}
}
}
],
"vdisks":[
{
"label":null,
"resource_id":"c8293bb0-031a-4d33-92ee-188380211483",
"restore_size":1073741824,
"vm_id":"e33c1eea-c533-4945-864d-0da1fc002070",
"volume_id":"365ad75b-ca76-46cb-8eea-435535fd2e22",
"volume_name":"365ad75b-ca76-46cb-8eea-435535fd2e22",
"volume_size":"1",
"volume_type":"iscsi",
"volume_mountpoint":"/dev/vda",
"availability_zone":"nova",
"metadata":{
"readonly":"False",
"attached_mode":"rw"
}
}
]
}
],
"name":"API taken 2",
"description":"API taken description 2",
"host":"TVM1",
"size":44171264,
"restore_size":2147483648,
"uploaded_size":44171264,
"progress_percent":100,
"progress_msg":"Snapshot of workload is complete",
"warning_msg":null,
"error_msg":null,
"time_taken":428,
"pinned":false,
"metadata":[
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"16fc1ce5-81b2-4c07-ac63-6c9232e0418f",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"backup_media_target",
"value":"10.10.2.20:/upstream"
},
{
"created_at":"2020-11-04T13:58:37.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"5a56bbad-9957-4fb3-9bbc-469ec571b549",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"cancel_requested",
"value":"0"
},
{
"created_at":"2020-11-04T14:05:29.000000",
"updated_at":"2020-11-04T14:05:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"d36abef7-9663-4d88-8f2e-ef914f068fb4",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"data_transfer_time",
"value":"0"
},
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"c75f9151-ef87-4a74-acf1-42bd2588ee64",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"hostnames",
"value":"[\"cirros-1\", \"cirros-2\"]"
},
{
"created_at":"2020-11-04T14:05:29.000000",
"updated_at":"2020-11-04T14:05:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"02916cce-79a2-4ad9-a7f6-9d9f59aa8424",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"object_store_transfer_time",
"value":"0"
},
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"96efad2f-a24f-4cde-8e21-9cd78f78381b",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"pause_at_snapshot",
"value":"0"
},
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"572a0b21-a415-498f-b7fa-6144d850ef56",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"policy_id",
"value":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd"
},
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"dfd7314d-8443-4a95-8e2a-7aad35ef97ea",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"preferredgroup",
"value":"[]"
},
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"2e17e1e4-4bb1-48a9-8f11-c4cd2cfca2a9",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"topology",
"value":"\"\\\"\\\"\""
},
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"33762790-8743-4e20-9f50-3505a00dbe76",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"workload_approx_backup_size",
"value":"6"
}
],
"restores_info":""
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 04 Nov 2020 14:18:36 GMT
Content-Type: application/json
Content-Length: 56
Connection: keep-alive
X-Compute-Request-Id: req-82ffb2b6-b28e-4c73-89a4-310890960dbc
{"task": {"id": "a73de236-6379-424a-abc7-33d553e050b7"}}
HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 04 Nov 2020 14:26:44 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-47a5a426-c241-429e-9d69-d40aed0dd68d{
"snapshot":{
"is_scheduled":<true/false>,
"name":"<name>",
"description":"<description>"
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 13:56:08 GMT
Content-Type: application/json
Content-Length: 1399
Connection: keep-alive
X-Compute-Request-Id: req-4618161e-64e4-489a-b8fc-f3cb21d94096
{
"policy_list":[
{
"id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":"2020-10-26T12:52:22.000000",
"status":"available",
"name":"Gold",
"description":"",
"metadata":[
],
"field_values":[
{
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"0201f8b4-482d-4ec1-9b92-8cf3092abcc2",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"policy_field_name":"retention_policy_value",
"value":"10"
},
{
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"48cc7007-e221-44de-bd4e-6a66841bdee0",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"policy_field_name":"interval",
"value":"5"
},
{
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"79070c67-9021-4220-8a79-648ffeebc144",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"policy_field_name":"retention_policy_type",
"value":"Number of Snapshots to Keep"
},
{
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"9fec205a-9528-45ea-a118-ffb64d8c7d9d",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"policy_field_name":"fullbackup_interval",
"value":"-1"
}
]
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 14:18:42 GMT
Content-Type: application/json
Content-Length: 2160
Connection: keep-alive
X-Compute-Request-Id: req-0583fc35-0f80-4746-b280-c17b32cc4b25
{
"policy":{
"id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":"2020-10-26T12:52:22.000000",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"status":"available",
"name":"Gold",
"description":"",
"field_values":[
{
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"0201f8b4-482d-4ec1-9b92-8cf3092abcc2",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"policy_field_name":"retention_policy_value",
"value":"10"
},
{
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"48cc7007-e221-44de-bd4e-6a66841bdee0",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"policy_field_name":"interval",
"value":"5"
},
{
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"79070c67-9021-4220-8a79-648ffeebc144",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"policy_field_name":"retention_policy_type",
"value":"Number of Snapshots to Keep"
},
{
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"9fec205a-9528-45ea-a118-ffb64d8c7d9d",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"policy_field_name":"fullbackup_interval",
"value":"-1"
}
],
"metadata":[
],
"policy_assignments":[
{
"created_at":"2020-10-26T12:53:01.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"3e3f1b12-1b1f-452b-a9d2-b6e5fbf2ab18",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"policy_name":"Gold",
"project_name":"admin"
},
{
"created_at":"2020-10-29T15:39:13.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"8b4a6236-63f1-4e2d-b8d1-23b37f4b4346",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"policy_name":"Gold",
"project_name":"robert"
}
]
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 09:14:01 GMT
Content-Type: application/json
Content-Length: 338
Connection: keep-alive
X-Compute-Request-Id: req-57175488-d267-4dcb-90b5-f239d8b02fe2
{
"policies":[
{
"created_at":"2020-10-29T15:39:13.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"8b4a6236-63f1-4e2d-b8d1-23b37f4b4346",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"policy_name":"Gold",
"project_name":"robert"
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 09:24:03 GMT
Content-Type: application/json
Content-Length: 1413
Connection: keep-alive
X-Compute-Request-Id: req-05e05333-b967-4d4e-9c9b-561f1a7add5a
{
"policy":{
"id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:24:01.000000",
"status":"available",
"name":"CLI created",
"description":"CLI created",
"metadata":[
],
"field_values":[
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"767ae42d-caf0-4d36-963c-9b0e50991711",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"interval",
"value":"4 hr"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"7e34ce5c-3de0-408e-8294-cc091bee281f",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"retention_policy_value",
"value":"10"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"95537f7c-e59a-4365-b1e9-7fa2ed49c677",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"retention_policy_type",
"value":"Number of Snapshots to Keep"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"f635bece-be61-4e72-bce4-bc72a6f549e3",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"fullbackup_interval",
"value":"-1"
}
]
}
}{
"workload_policy":{
"field_values":{
"fullbackup_interval":"<-1 for never / 0 for always / Integer>",
"retention_policy_type":"<Number of Snapshots to Keep/Number of days to retain Snapshots>",
"interval":"<Integer hr>",
"retention_policy_value":"<Integer>"
},
"display_name":"<String>",
"display_description":"<String>",
"metadata":{
<key>:<value>
}
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 09:32:13 GMT
Content-Type: application/json
Content-Length: 1515
Connection: keep-alive
X-Compute-Request-Id: req-9104cf1c-4025-48f5-be92-1a6b7117bf95
{
"policy":{
"id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:24:01.000000",
"status":"available",
"name":"API created",
"description":"API created",
"metadata":[
],
"field_values":[
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"767ae42d-caf0-4d36-963c-9b0e50991711",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"interval",
"value":"8 hr"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"7e34ce5c-3de0-408e-8294-cc091bee281f",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"retention_policy_value",
"value":"20"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"95537f7c-e59a-4365-b1e9-7fa2ed49c677",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"retention_policy_type",
"value":"Number of days to retain Snapshots"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"f635bece-be61-4e72-bce4-bc72a6f549e3",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"fullbackup_interval",
"value":"7"
}
]
}
}{
"policy":{
"field_values":{
"fullbackup_interval":"<-1 for never / 0 for always / Integer>",
"retention_policy_type":"<Number of Snapshots to Keep/Number of days to retain Snapshots>",
"interval":"<Integer hr>",
"retention_policy_value":"<Integer>"
},
"display_name":"String",
"display_description":"String",
"metadata":{
<key>:<value>
}
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 09:46:23 GMT
Content-Type: application/json
Content-Length: 2318
Connection: keep-alive
X-Compute-Request-Id: req-169a53e4-b1c9-4bd1-bf68-3416d177d868
{
"policy":{
"id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:24:01.000000",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"status":"available",
"name":"API created",
"description":"API created",
"field_values":[
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"767ae42d-caf0-4d36-963c-9b0e50991711",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"interval",
"value":"8 hr"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"7e34ce5c-3de0-408e-8294-cc091bee281f",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"retention_policy_value",
"value":"20"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"95537f7c-e59a-4365-b1e9-7fa2ed49c677",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"retention_policy_type",
"value":"Number of days to retain Snapshots"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"f635bece-be61-4e72-bce4-bc72a6f549e3",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"fullbackup_interval",
"value":"7"
}
],
"metadata":[
],
"policy_assignments":[
{
"created_at":"2020-11-17T09:46:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"4794ed95-d8d1-4572-93e8-cebd6d4df48f",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"project_id":"cbad43105e404c86a1cd07c48a737f9c",
"policy_name":"API created",
"project_name":"services"
},
{
"created_at":"2020-11-17T09:46:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"68f187a6-3526-4a35-8b2d-cb0e9f497dd8",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"policy_name":"API created",
"project_name":"robert"
}
]
},
"failed_ids":[
]
}{
"policy":{
"remove_projects":[
"<project_id>"
],
"add_projects":[
"<project_id>",
]
}
}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 09:56:03 GMT
Content-Type: text/html; charset=UTF-8
Content-Length: 0
Connection: keep-aliveworkloadmgr restore-list [--snapshot_id <snapshot_id>]workloadmgr restore-show [--output <output>] <restore_id>workloadmgr restore-delete <restores_id>workloadmgr restore-cancel <restore_id>workloadmgr snapshot-oneclick-restore [--display-name <display-name>]
[--display-description <display-description>]
<snapshot_id>workloadmgr snapshot-selective-restore [--display-name <display-name>]
[--display-description <display-description>]
[--filename <filename>]
<snapshot_id>workloadmgr snapshot-inplace-restore [--display-name <display-name>]
[--display-description <display-description>]
[--filename <filename>]
<snapshot_id>{
oneclickrestore: False,
restore_type: selective,
type: openstack,
openstack:
{
instances:
[
{
include: True,
id: 890888bc-a001-4b62-a25b-484b34ac6e7e,
name: cdcentOS-1,
availability_zone:,
nics: [],
vdisks:
[
{
id: 4cc2b474-1f1b-4054-a922-497ef5564624,
new_volume_type:,
availability_zone: nova
}
],
flavor:
{
ram: 512,
ephemeral: 0,
vcpus: 1,
swap:,
disk: 1,
id: 1
}
}
],
restore_topology: True,
networks_mapping:
{
networks: []
}
}
}'instances':[
{
'name':'cdcentOS-1-selective',
'availability_zone':'US-East',
'nics':[
{
'mac_address':'fa:16:3e:00:bd:60',
'ip_address':'192.168.0.100',
'id':'8b871820-f92e-41f6-80b4-00555a649b4c',
'network':{
'subnet':{
'id':'2b1506f4-2a7a-4602-a8b9-b7e8a49f95b8'
},
'id':'d5047e84-077e-4b38-bc43-e3360b0ad174'
}
}
],
'vdisks':[
{
'id':'4cc2b474-1f1b-4054-a922-497ef5564624',
'new_volume_type':'ceph',
'availability_zone':'nova'
}
],
'flavor':{
'ram':2048,
'ephemeral':0,
'vcpus':1,
'swap':'',
'disk':20,
'id':'2'
},
'include':True,
'id':'890888bc-a001-4b62-a25b-484b34ac6e7e'
}
]restore_topology:Truerestore_topology:False{
'oneclickrestore':False,
'openstack':{
'instances':[
{
'name':'cdcentOS-1-selective',
'availability_zone':'US-East',
'nics':[
{
'mac_address':'fa:16:3e:00:bd:60',
'ip_address':'192.168.0.100',
'id':'8b871820-f92e-41f6-80b4-00555a649b4c',
'network':{
'subnet':{
'id':'2b1506f4-2a7a-4602-a8b9-b7e8a49f95b8'
},
'id':'d5047e84-077e-4b38-bc43-e3360b0ad174'
}
}
],
'vdisks':[
{
'id':'4cc2b474-1f1b-4054-a922-497ef5564624',
'new_volume_type':'ceph',
'availability_zone':'nova'
}
],
'flavor':{
'ram':2048,
'ephemeral':0,
'vcpus':1,
'swap':'',
'disk':20,
'id':'2'
},
'include':True,
'id':'890888bc-a001-4b62-a25b-484b34ac6e7e'
}
],
'restore_topology':False,
'networks_mapping':{
'networks':[
{
'snapshot_network':{
'subnet':{
'id':'8b609440-4abf-4acf-a36b-9a0fa70c383c'
},
'id':'8b871820-f92e-41f6-80b4-00555a649b4c'
},
'target_network':{
'subnet':{
'id':'2b1506f4-2a7a-4602-a8b9-b7e8a49f95b8'
},
'id':'d5047e84-077e-4b38-bc43-e3360b0ad174',
'name':'internal'
}
}
]
}
},
'restore_type':'selective',
'type':'openstack'
}{
'oneclickrestore':False,
'restore_type':'inplace',
'type':'openstack',
'openstack':{
'instances':[
{
'restore_boot_disk':True,
'include':True,
'id':'ba8c27ab-06ed-4451-9922-d919171078de',
'vdisks':[
{
'restore_cinder_volume':True,
'id':'04d66b70-6d7c-4d1b-98e0-11059b89cba6',
}
]
}
]
}
}ip_address ➡️ IP Address of the Neutron portephemeral➡️How big the ephemeral disk of the instance will be (in GB)subnet ➡️ the subnet of the network backed up in the snapshot, contains the following:cd triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/cd triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/cd /home/stack
git clone -b 4.3.2 https://github.com/trilioData/triliovault-cfg-scripts.git
cd triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/cd triliovault-cfg-scripts/redhat-director-scripts/rhosp13/cd triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/cd triliovault-cfg-scripts/redhat-director-scripts/rhosp16.2/cd triliovault-cfg-scripts/redhat-director-scripts/rhosp17.0/cp s3-cert.pem /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/puppet/trilio/filescd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/scripts/
./upload_puppet_module.sh
## Output of above command looks like following for RHOSP13, RHOSP16.1 and RHOSP16.2
Creating tarball...
Tarball created.
Creating heat environment file: /home/stack/.tripleo/environments/puppet-modules-url.yaml
Uploading file to swift: /tmp/puppet-modules-8Qjya2X/puppet-modules.tar.gz
+-----------------------+---------------------+----------------------------------+
| object | container | etag |
+-----------------------+---------------------+----------------------------------+
| puppet-modules.tar.gz | overcloud-artifacts | 368951f6a4d39cfe53b5781797b133ad |
+-----------------------+---------------------+----------------------------------+
## Above command creates following file.
ls -ll /home/stack/.tripleo/environments/puppet-modules-url.yaml
## Command is same for RHOSP17.0 but the command output and file content would be different
cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp17.0/scripts/
./upload_puppet_module.sh
## Output of above command looks like following for RHOSP17.0
Creating tarball...
Tarball created.
renamed '/tmp/puppet-modules-P3duCg9/puppet-modules.tar.gz' -> '/var/lib/tripleo/artifacts/overcloud-artifacts/puppet-modules.tar.gz'
Creating heat environment file: /home/stack/.tripleo/environments/puppet-modules-url.yaml
## Above command creates following file.
ls -ll /home/stack/.tripleo/environments/puppet-modules-url.yaml
## For RHOSP13, RHOSP16.1 and RHOSP16.2
(undercloud) [stack@ucloud161 ~]$ cat /home/stack/.tripleo/environments/puppet-modules-url.yaml
# Heat environment to deploy artifacts via Swift Temp URL(s)
parameter_defaults:
DeployArtifactURLs:
- 'http://172.25.0.103:8080/v1/AUTH_46ba596219d143c8b076e9fcc4139fed/overcloud-artifacts/puppet-modules.tar.gz?temp_url_sig=c3972b7ce75226c278ab3fa8237d31cc1f2115bd&temp_url_expires=1646738377'
## For RHOSP17.0
(undercloud) [stack@undercloud17-3 scripts]$ cat /home/stack/.tripleo/environments/puppet-modules-url.yaml
parameter_defaults:
DeployArtifactFILEs:
- /var/lib/tripleo/artifacts/overcloud-artifacts/puppet-modules.tar.gz
(undercloud) [stack@ucloud161 ~]$ cat /home/stack/.tripleo/environments/puppet-modules-url.yaml | grep http >> /home/stack/templates/user-artifacts.yaml
(undercloud) [stack@ucloud161 ~]$ cat /home/stack/templates/user-artifacts.yaml
# Heat environment to deploy artifacts via Swift Temp URL(s)
parameter_defaults:
DeployArtifactURLs:
- 'http://172.25.0.103:8080/v1/AUTH_57ba596219d143c8b076e9fcc4139f3g/overcloud-artifacts/some-artifact.tar.gz?temp_url_sig=dc972b7ce75226c278ab3fa8237d31cc1f2115sc&temp_url_expires=3446738365'
- 'http://172.25.0.103:8080/v1/AUTH_46ba596219d143c8b076e9fcc4139fed/overcloud-artifacts/puppet-modules.tar.gz?temp_url_sig=c3972b7ce75226c278ab3fa8237d31cc1f2115bd&temp_url_expires=1646738377'
'OS::TripleO::Services::TrilioDatamoverApi''OS::TripleO::Services::TrilioDatamover'OS::TripleO::Services::TrilioHorizonTrilio Datamover container: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
Trilio Datamover Api Container: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
Trilio horizon plugin: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13Trilio Datamover container: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
Trilio Datamover Api Container: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
Trilio horizon plugin: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1Trilio Datamover container: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
Trilio Datamover Api Container: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
Trilio horizon plugin: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2Trilio Datamover container: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp17.0
Trilio Datamover Api Container: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp17.0
Trilio horizon plugin: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp17.0parameter_defaults:
ContainerImagePrepare:
- push_destination: false
set:
namespace: registry.redhat.io/...
...
...
ContainerImageRegistryCredentials:
registry.redhat.io:
myuser: 'p@55w0rd!'
registry.connect.redhat.com:
myuser: 'p@55w0rd!'
ContainerImageRegistryLogin: true# For RHOSP13
$ grep '<HOTFIX-TAG-VERSION>-rhosp13' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
DockerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13
# For RHOSP16.1
$ grep '<HOTFIX-TAG-VERSION>-rhosp16.1' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1
# For RHOSP16.2
$ grep '<HOTFIX-TAG-VERSION>-rhosp16.2' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
ContainerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2
# For RHOSP17.0
$ grep '<HOTFIX-TAG-VERSION>-rhosp17.0' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp17.0
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp17.0
ContainerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp17.0cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp13/scripts/
./prepare_trilio_images.sh <undercloud_ip/hostname> <HOTFIX-TAG-VERSION>-rhosp13
## Verify changes
$ grep '<HOTFIX-TAG-VERSION>-rhosp13' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: 172.25.2.2:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
DockerTrilioDmApiImage: 172.25.2.2:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
DockerHorizonImage: 172.25.2.2:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13
$ docker image list | grep <HOTFIX-TAG-VERSION>-rhosp13
172.30.5.101:8787/trilio/trilio-datamover <HOTFIX-TAG-VERSION>-rhosp13 f2dfb36bb176 8 weeks ago 3.61 GB
registry.connect.redhat.com/trilio/trilio-datamover <HOTFIX-TAG-VERSION>-rhosp13 f2dfb36bb176 8 weeks ago 3.61 GB
172.30.5.101:8787/trilio/trilio-datamover-api <HOTFIX-TAG-VERSION>-rhosp13 5d62f572a00c 8 weeks ago 2.24 GB
registry.connect.redhat.com/trilio/trilio-datamover-api <HOTFIX-TAG-VERSION>-rhosp13 5d62f572a00c 8 weeks ago 2.24 GB
registry.connect.redhat.com/trilio/trilio-horizon-plugin <HOTFIX-TAG-VERSION>-rhosp13 27c4de28e5ae 2 months ago 2.27 GB
172.30.5.101:8787/trilio/trilio-horizon-plugin <HOTFIX-TAG-VERSION>-rhosp13 27c4de28e5ae 2 months ago 2.27 GBcd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/scripts/
sudo ./prepare_trilio_images.sh <undercloud_ip/hostname> <HOTFIX-TAG-VERSION>-rhosp16.1
## Verify changes
$ grep '<HOTFIX-TAG-VERSION>-rhosp16.1' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1
$ openstack tripleo container image list | grep <HOTFIX-TAG-VERSION>-rhosp16.1
| docker://tlsundercloud.ctlplane.trilio.local:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1 |
| docker://tlsundercloud.ctlplane.trilio.local:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1 |
| docker://tlsundercloud.ctlplane.trilio.local:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1 |
-----------------------------------------------------------------------------------------------------cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.2/scripts/
sudo ./prepare_trilio_images.sh <undercloud_ip/hostname> <HOTFIX-TAG-VERSION>-rhosp16.2
## Verify changes
$ grep '<HOTFIX-TAG-VERSION>-rhosp16.2' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
DockerTrilioDmApiImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
ContainerHorizonImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2
$ openstack tripleo container image list | grep <HOTFIX-TAG-VERSION>-rhosp16.2
| docker://tlsundercloud.ctlplane.trilio.local:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2 |
| docker://tlsundercloud.ctlplane.trilio.local:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2 |
| docker://tlsundercloud.ctlplane.trilio.local:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2 |cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp17.0/scripts/
sudo ./prepare_trilio_images.sh <undercloud_ip/hostname> <HOTFIX-TAG-VERSION>-rhosp17.0
## Verify changes
$ grep '<HOTFIX-TAG-VERSION>-rhosp17.0' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp17.0
DockerTrilioDmApiImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp17.0
ContainerHorizonImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp17.0
$ openstack tripleo container image list | grep <HOTFIX-TAG-VERSION>-rhosp17.0
| docker://tlsundercloud.ctlplane.trilio.local:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp17.0 |
| docker://tlsundercloud.ctlplane.trilio.local:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp17.0 |
| docker://tlsundercloud.ctlplane.trilio.local:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp17.0 |$ grep '<HOTFIX-TAG-VERSION>-rhosp13' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
DockerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13$ grep '<HOTFIX-TAG-VERSION>-rhosp16.1' trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1$ grep '<HOTFIX-TAG-VERSION>-rhosp16.2' trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
ContainerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2$ grep '<HOTFIX-TAG-VERSION>-rhosp17.0' trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp17.0
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp17.0
ContainerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp17.0cd triliovault-cfg-scripts/common/(undercloud) [stack@ucqa161 ~]$ openstack server list
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+
| ID | Name | Status | Networks | Image | Flavor |
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+
| 8c3d04ae-fcdd-431c-afa6-9a50f3cb2c0d | overcloudtrain1-controller-2 | ACTIVE | ctlplane=172.30.5.18 | overcloud-full | control |
| 103dfd3e-d073-4123-9223-b8cf8c7398fe | overcloudtrain1-controller-0 | ACTIVE | ctlplane=172.30.5.11 | overcloud-full | control |
| a3541849-2e9b-4aa0-9fa9-91e7d24f0149 | overcloudtrain1-controller-1 | ACTIVE | ctlplane=172.30.5.25 | overcloud-full | control |
| 74a9f530-0c7b-49c4-9a1f-87e7eeda91c0 | overcloudtrain1-novacompute-0 | ACTIVE | ctlplane=172.30.5.30 | overcloud-full | compute |
| c1664ac3-7d9c-4a36-b375-0e4ee19e93e4 | overcloudtrain1-novacompute-1 | ACTIVE | ctlplane=172.30.5.15 | overcloud-full | compute |
+--------------------------------------+-------------------------------+--------+----------------------+----------------+---------+## On Python3 env
sudo pip3 install PyYAML==5.1 3
## On Python2 env
sudo pip install PyYAML==5.1## On Python3 env
python3 ./generate_nfs_map.py
## On Python2 env
python ./generate_nfs_map.pygrep ':.*:' triliovault_nfs_map_output.yml >> ../redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/environments/trilio_nfs_map.yaml-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/environments/trilio_nfs_map.yamlresource_registry:
OS::TripleO::Services::TrilioDatamover: ../services/trilio-datamover.yaml
OS::TripleO::Services::TrilioDatamoverApi: ../services/trilio-datamover-api.yaml
OS::TripleO::Services::TrilioHorizon: ../services/trilio-horizon.yaml
# NOTE: If there are addition customizations to the endpoint map (e.g. for
# other integratiosn), this will need to be regenerated.
OS::TripleO::EndpointMap: endpoint_map.yaml
parameter_defaults:
## Enable Trilio's quota functionality on horizon
ExtraConfig:
horizon::customization_module: 'dashboards.overrides'
## Define network map for trilio datamover api service
ServiceNetMap:
TrilioDatamoverApiNetwork: internal_api
## Trilio Datamover Password for keystone and database
TrilioDatamoverPassword: "test1234"
## Trilio container pull urls
DockerTrilioDatamoverImage: devundercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: devundercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
## If you do not want Trilio's horizon plugin to replace your horizon container, just comment following line.
ContainerHorizonImage: devundercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1
## Backup target type nfs/s3, used to store snapshots taken by triliovault
BackupTargetType: 'nfs'
## If backup target NFS share support multiple IPs and you want to use those IPs(more than one) then
## set this parameter to True. Otherwise keep it False.
MultiIPNfsEnabled: False
## For backup target 'nfs'
NfsShares: '192.168.122.101:/opt/tvault'
NfsOptions: 'nolock,soft,timeo=180,intr,lookupcache=none'
## For backup target 's3'
## S3 type: amazon_s3/ceph_s3
S3Type: 'amazon_s3'
## S3 access key
S3AccessKey: ''
## S3 secret key
S3SecretKey: ''
## S3 region, if your s3 does not have any region, just keep the parameter as it is
S3RegionName: ''
## S3 bucket name
S3Bucket: ''
## S3 endpoint url, not required for Amazon S3, keep it as it is
S3EndpointUrl: ''
## S3 signature version
S3SignatureVersion: 'default'
## S3 Auth version
S3AuthVersion: 'DEFAULT'
## If S3 backend is not Amazon S3 and SSL is enabled on S3 endpoint url then change it to 'True', otherwise keep it as 'False'
S3SslEnabled: False
## If S3 backend is not Amazon S3 and SSL is enabled on S3 endpoint URL and SSL certificates are self signed, then
## user need to set this parameter value to: '/etc/tvault-contego/s3-cert.pem', otherwise keep it's value as empty string.
S3SslCert: ''
## Configure 'dmapi_workers' parameter of '/etc/dmapi/dmapi.conf' file
## This parameter value used to spawn the number of dmapi processes to handle the incoming api requests.
## If your dmapi node has ‘n' cpu cores, It is recommended, to set this parameter to '4*n’.
## If dmapi_workers field is not present in config file. The Default value will be equals to number of cores present on the node
DmApiWorkers: 16
## Don't edit following parameter
EnablePackageInstall: True
## Load 'rbd' kernel module on all compute nodes
ComputeParameters:
ExtraKernelModules:
rbd: {}/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfglisten trilio_datamover_api
bind 172.25.0.107:13784 transparent ssl crt /etc/pki/tls/private/overcloud_endpoint.pem
bind 172.25.0.107:8784 transparent
balance roundrobin
http-request set-header X-Forwarded-Proto https if { ssl_fc }
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
http-request set-header X-Forwarded-Port %[dst_port]
maxconn 50000
option httpchk
option httplog
retries 5
timeout check 10m
timeout client 10m
timeout connect 10m
timeout http-request 10m
timeout queue 10m
timeout server 10m
server overcloud-controller-0.internalapi.localdomain 172.25.0.106:8784 check fall 5 inter 2000 rise 2
retries 5
timeout http-request 10m
timeout queue 10m
timeout connect 10m
timeout client 10m
timeout server 10m
timeout check 10m
balance roundrobin
maxconn 50000/home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp13/services/trilio-datamover-api.yaml/home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/services/trilio-datamover-api.yaml/home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.2/services/trilio-datamover-api.yaml/home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp17.0/services/trilio-datamover-api.yaml tripleo::haproxy::trilio_datamover_api::options:
'retries': '5'
'maxconn': '50000'
'balance': 'roundrobin'
'timeout http-request': '10m'
'timeout queue': '10m'
'timeout connect': '10m'
'timeout client': '10m'
'timeout server': '10m'
'timeout check': '10m'triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE>/environments/trilio_datamover_opt_volumes.yamlparameter_defaults:
TrilioDatamoverOptVolumes:
- /opt/dir1:/opt/dir1
- /mnt/dir2:/var/dir2openstack overcloud deploy --templates \
-e <> \
.
.
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/environments/trilio_datamover_opt_volumes.yamlopenstack overcloud deploy --templates \
-e /home/stack/templates/node-info.yaml \
-e /home/stack/templates/overcloud_images.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/environments/trilio_env.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/enable-tls.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/inject-trust-anchor.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/environments/trilio_env_tls_endpoints_public_dns.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/environments/trilio_nfs_map.yaml \
--ntp-server 192.168.1.34 \
--libvirt-type qemu \
--log-file overcloud_deploy.log \
-r /usr/share/openstack-tripleo-heat-templates/roles_data.yaml[root@overcloud-controller-0 heat-admin]# podman ps | grep trilio
26fcb9194566 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2 kolla_start 5 days ago Up 5 days ago trilio_dmapi
094971d0f5a9 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2 kolla_start 5 days ago Up 5 days ago horizon/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg[root@overcloud-novacompute-0 heat-admin]# podman ps | grep trilio
b1840444cc59 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2 kolla_start 5 days ago Up 5 days ago trilio_datamover[root@overcloud-controller-0 heat-admin]# podman ps | grep horizon
094971d0f5a9 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2 kolla_start 5 days ago Up 5 days ago horizon## Either of the below workarounds should be performed on all the controller nodes where issue occurs for horizon pod.
option-1: Restart the memcached service on controller using systemctl (command: systemctl restart tripleo_memcached.service)
option-2: Restart the memcached pod (command: podman restart memcached)openstack stack failures list overcloud
heat stack-list --show-nested -f "status=FAILED"
heat resource-list --nested-depth 5 overcloud | grep FAILED
=> If trilio datamover api containers does not start well or in restarting state, use following logs to debug.
docker logs trilio_dmapi
tailf /var/log/containers/trilio-datamover-api/dmapi.log
=> If trilio datamover containers does not start well or in restarting state, use following logs to debug.
docker logs trilio_datamover
tailf /var/log/containers/trilio-datamover/tvault-contego.logtls-everywhere-endpoints-dns.yaml file, useenvironments/trilio_env_tls_everywhere_dns.yaml
# mount <NFS B2-IP/NFS B2-FQDN>:/<VOL-Path> /mntworkload_ac9cae9b-5e1b-4899-930c-6aa0600a2105/…/workload_<id>/workload_db <<< Contains User ID and Project ID of Workload owner
/…/workload_<id>/workload_vms_db <<< Contains VM IDs and VM Names of all VMs actively protected be the Workload# cp /mnt/workload_ac9cae9b-5e1b-4899-930c-6aa0600a2105 /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW0=/workload_ac9cae9b-5e1b-4899-930c-6aa0600a2105
# chown -R nova:nova /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW0=/workload_ac9cae9b-5e1b-4899-930c-6aa0600a2105
# chmod -R 644 /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW0=/workload_ac9cae9b-5e1b-4899-930c-6aa0600a2105#qemu-img info bd57ec9b-c4ac-4a37-a4fd-5c9aa002c778
image: bd57ec9b-c4ac-4a37-a4fd-5c9aa002c778
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 516K
cluster_size: 65536
backing file: /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW0=/workload_ac9cae9b-5e1b-4899-930c-6aa0600a2105/snapshot_1415095d-c047-400b-8b05-c88e57011263/vm_id_38b620f1-24ae-41d7-b0ab-85ffc2d7958b/vm_res_id_d4ab3431-5ce3-4a8f-a90b-07606e2ffa33_vda/7c39eb6a-6e42-418e-8690-b6368ecaa7bb
Format specific information:
compat: 1.1
lazy refcounts: false
refcount bits: 16
corrupt: false
# echo -n 10.10.2.20:/NFS_A1 | base64
MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl
# echo -n 10.20.3.22:/NFS_B2 | base64
MTAuMjAuMy4yMjovdXBzdHJlYW1fdGFyZ2V0#mkdir /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl
#mount --bind
/var/triliovault-mounts/MTAuMjAuMy4yMjovdXBzdHJlYW1fdGFyZ2V0/ /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl#vi /etc/fstab
/var/triliovault-mounts/MTAuMjAuMy4yMjovdXBzdHJlYW1fdGFyZ2V0/ / var/triliovault-mounts/ MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl none bind 0 0# source {customer admin rc file}
# openstack role add Admin --user <my_admin_user> --user-domain <admin_domain> --domain <target_domain>
# openstack role add Admin --user <my_admin_user> --user-domain <admin_domain> --project <target_project> --project-domain <target_domain>
# openstack role add <Backup Trustee Role> --user <my_admin_user> --user-domain <admin_domain> --project <destination_project> --project-domain <target_domain># workloadmgr workload-get-orphaned-workloads-list --migrate_cloud True
+------------+--------------------------------------+----------------------------------+----------------------------------+
| Name | ID | Project ID | User ID |
+------------+--------------------------------------+----------------------------------+----------------------------------+
| Workload_1 | 6639525d-736a-40c5-8133-5caaddaaa8e9 | 4224d3acfd394cc08228cc8072861a35 | 329880dedb4cd357579a3279835f392 |
| Workload_2 | 904e72f7-27bb-4235-9b31-13a636eb9c95 | 637a9ce3fd0d404cabf1a776696c9c04 | 329880dedb4cd357579a3279835f392 |
+------------+--------------------------------------+----------------------------------+----------------------------------+# openstack project list --domain <target_domain>
+----------------------------------+----------+
| ID | Name |
+----------------------------------+----------+
| 01fca51462a44bfa821130dce9baac1a | project1 |
| 33b4db1099ff4a65a4c1f69a14f932ee | project2 |
| 9139e694eb984a4a979b5ae8feb955af | project3 |
+----------------------------------+----------+ # openstack role assignment list --project <target_project> --project-domain <target_domain> --role <backup_trustee_role>
+----------------------------------+----------------------------------+-------+----------------------------------+--------+-----------+
| Role | User | Group | Project | Domain | Inherited |
+----------------------------------+----------------------------------+-------+----------------------------------+--------+-----------+
| 9fe2ff9ee4384b1894a90878d3e92bab | 72e65c264a694272928f5d84b73fe9ce | | 8e16700ae3614da4ba80a4e57d60cdb9 | | False |
| 9fe2ff9ee4384b1894a90878d3e92bab | d5fbd79f4e834f51bfec08be6d3b2ff2 | | 8e16700ae3614da4ba80a4e57d60cdb9 | | False |
| 9fe2ff9ee4384b1894a90878d3e92bab | f5b1d071816742fba6287d2c8ffcd6c4 | | 8e16700ae3614da4ba80a4e57d60cdb9 | | False |
+----------------------------------+----------------------------------+-------+----------------------------------+--------+-----------+# workloadmgr workload-reassign-workloads --new_tenant_id {target_project_id} --user_id {target_user_id} --workload_ids {workload_id} --migrate_cloud True
+-----------+--------------------------------------+----------------------------------+----------------------------------+
| Name | ID | Project ID | User ID |
+-----------+--------------------------------------+----------------------------------+----------------------------------+
| project1 | 904e72f7-27bb-4235-9b31-13a636eb9c95 | 4f2a91274ce9491481db795dcb10b04f | 3e05cac47338425d827193ba374749cc |
+-----------+--------------------------------------+----------------------------------+----------------------------------+ # workloadmgr workload-show ac9cae9b-5e1b-4899-930c-6aa0600a2105
+-------------------+------------------------------------------------------------------------------------------------------+
| Property | Value |
+-------------------+------------------------------------------------------------------------------------------------------+
| availability_zone | nova |
| created_at | 2019-04-18T02:19:39.000000 |
| description | Test Linux VMs |
| error_msg | None |
| id | ac9cae9b-5e1b-4899-930c-6aa0600a2105 |
| instances | [{"id": "38b620f1-24ae-41d7-b0ab-85ffc2d7958b", "name": "Test-Linux-1"}, {"id": |
| | "3fd869b2-16bd-4423-b389-18d19d37c8e0", "name": "Test-Linux-2"}] |
| interval | None |
| jobschedule | True |
| name | Test Linux |
| project_id | 2fc4e2180c2745629753305591aeb93b |
| scheduler_trust | None |
| status | available |
| storage_usage | {"usage": 60555264, "full": {"usage": 44695552, "snap_count": 1}, "incremental": {"usage": 15859712, |
| | "snap_count": 13}} |
| updated_at | 2019-11-15T02:32:43.000000 |
| user_id | 72e65c264a694272928f5d84b73fe9ce |
| workload_type_id | f82ce76f-17fe-438b-aa37-7a023058e50d |
+-------------------+------------------------------------------------------------------------------------------------------+# workloadmgr snapshot-list --workload_id ac9cae9b-5e1b-4899-930c-6aa0600a2105 --all True
+----------------------------+--------------+--------------------------------------+--------------------------------------+---------------+-----------+-----------+
| Created At | Name | ID | Workload ID | Snapshot Type | Status | Host |
+----------------------------+--------------+--------------------------------------+--------------------------------------+---------------+-----------+-----------+
| 2019-11-02T02:30:02.000000 | jobscheduler | f5b8c3fd-c289-487d-9d50-fe27a6561d78 | ac9cae9b-5e1b-4899-930c-6aa0600a2105 | full | available | Upstream2 |
| 2019-11-03T02:30:02.000000 | jobscheduler | 7e39e544-537d-4417-853d-11463e7396f9 | ac9cae9b-5e1b-4899-930c-6aa0600a2105 | incremental | available | Upstream2 |
| 2019-11-04T02:30:02.000000 | jobscheduler | 0c086f3f-fa5d-425f-b07e-a1adcdcafea9 | ac9cae9b-5e1b-4899-930c-6aa0600a2105 | incremental | available | Upstream2 |
+----------------------------+--------------+--------------------------------------+--------------------------------------+---------------+-----------+-----------+# workloadmgr snapshot-show --output networks 7e39e544-537d-4417-853d-11463e7396f9
+-------------------+--------------------------------------+
| Snapshot property | Value |
+-------------------+--------------------------------------+
| description | None |
| host | Upstream2 |
| id | 7e39e544-537d-4417-853d-11463e7396f9 |
| name | jobscheduler |
| progress_percent | 100 |
| restore_size | 44040192 Bytes or Approx (42.0MB) |
| restores_info | |
| size | 1310720 Bytes or Approx (1.2MB) |
| snapshot_type | incremental |
| status | available |
| time_taken | 154 Seconds |
| uploaded_size | 1310720 |
| workload_id | ac9cae9b-5e1b-4899-930c-6aa0600a2105 |
+-------------------+--------------------------------------+
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Instances | Value |
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-1 |
| ID | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| | |
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-2 |
| ID | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| | |
+----------------+---------------------------------------------------------------------------------------------------------------------+
+-------------+----------------------------------------------------------------------------------------------------------------------------------------------+
| Networks | Value |
+-------------+----------------------------------------------------------------------------------------------------------------------------------------------+
| ip_address | 172.20.20.20 |
| vm_id | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| network | {u'subnet': {u'ip_version': 4, u'cidr': u'172.20.20.0/24', u'gateway_ip': u'172.20.20.1', u'id': u'3a756a89-d979-4cda-a7f3-dacad8594e44',
u'name': u'Trilio Test'}, u'cidr': None, u'id': u'5f0e5d34-569d-42c9-97c2-df944f3924b1', u'name': u'Trilio_Test_Internal', u'network_type': u'neutron'} |
| mac_address | fa:16:3e:74:58:bb |
| | |
| ip_address | 172.20.20.13 |
| vm_id | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| network | {u'subnet': {u'ip_version': 4, u'cidr': u'172.20.20.0/24', u'gateway_ip': u'172.20.20.1', u'id': u'3a756a89-d979-4cda-a7f3-dacad8594e44',
u'name': u'Trilio Test'}, u'cidr': None, u'id': u'5f0e5d34-569d-42c9-97c2-df944f3924b1', u'name': u'Trilio_Test_Internal', u'network_type': u'neutron'} |
| mac_address | fa:16:3e:6b:46:ae |
+-------------+----------------------------------------------------------------------------------------------------------------------------------------------+[root@upstreamcontroller ~(keystone_admin)]# workloadmgr snapshot-show --output disks 7e39e544-537d-4417-853d-11463e7396f9
+-------------------+--------------------------------------+
| Snapshot property | Value |
+-------------------+--------------------------------------+
| description | None |
| host | Upstream2 |
| id | 7e39e544-537d-4417-853d-11463e7396f9 |
| name | jobscheduler |
| progress_percent | 100 |
| restore_size | 44040192 Bytes or Approx (42.0MB) |
| restores_info | |
| size | 1310720 Bytes or Approx (1.2MB) |
| snapshot_type | incremental |
| status | available |
| time_taken | 154 Seconds |
| uploaded_size | 1310720 |
| workload_id | ac9cae9b-5e1b-4899-930c-6aa0600a2105 |
+-------------------+--------------------------------------+
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Instances | Value |
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-1 |
| ID | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| | |
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-2 |
| ID | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| | |
+----------------+---------------------------------------------------------------------------------------------------------------------+
+-------------------+--------------------------------------------------+
| Vdisks | Value |
+-------------------+--------------------------------------------------+
| volume_mountpoint | /dev/vda |
| restore_size | 22020096 |
| resource_id | ebc2fdd0-3c4d-4548-b92d-0e16734b5d9a |
| volume_name | 0027b140-a427-46cb-9ccf-7895c7624493 |
| volume_type | None |
| label | None |
| volume_size | 1 |
| volume_id | 0027b140-a427-46cb-9ccf-7895c7624493 |
| availability_zone | nova |
| vm_id | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| metadata | {u'readonly': u'False', u'attached_mode': u'rw'} |
| | |
| volume_mountpoint | /dev/vda |
| restore_size | 22020096 |
| resource_id | 8007ed89-6a86-447e-badb-e49f1e92f57a |
| volume_name | 2a7f9e78-7778-4452-af5b-8e2fa43853bd |
| volume_type | None |
| label | None |
| volume_size | 1 |
| volume_id | 2a7f9e78-7778-4452-af5b-8e2fa43853bd |
| availability_zone | nova |
| vm_id | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| metadata | {u'readonly': u'False', u'attached_mode': u'rw'} |
| | |
+-------------------+--------------------------------------------------+{
u'description':u'<description of the restore>',
u'oneclickrestore':False,
u'restore_type':u'selective',
u'type':u'openstack',
u'name':u'<name of the restore>'
u'openstack':{
u'instances':[
{
u'name':u'<name instance 1>',
u'availability_zone':u'<AZ instance 1>',
u'nics':[ #####Leave empty for network topology restore
],
u'vdisks':[
{
u'id':u'<old disk id>',
u'new_volume_type':u'<new volume type name>',
u'availability_zone':u'<new cinder volume AZ>'
}
],
u'flavor':{
u'ram':<RAM in MB>,
u'ephemeral':<GB of ephemeral disk>,
u'vcpus':<# vCPUs>,
u'swap':u'<GB of Swap disk>',
u'disk':<GB of boot disk>,
u'id':u'<id of the flavor to use>'
},
u'include':<True/False>,
u'id':u'<old id of the instance>'
} #####Repeat for each instance in the snapshot
],
u'restore_topology':<True/False>,
u'networks_mapping':{
u'networks':[ #####Leave empty for network topology restore
]
}
}
}
# workloadmgr snapshot-selective-restore --filename restore.json {snapshot id}[root@upstreamcontroller ~(keystone_admin)]# workloadmgr restore-list --snapshot_id 5928554d-a882-4881-9a5c-90e834c071af
+----------------------------+------------------+--------------------------------------+--------------------------------------+----------+-----------+
| Created At | Name | ID | Snapshot ID | Size | Status |
+----------------------------+------------------+--------------------------------------+--------------------------------------+----------+-----------+
| 2019-09-24T12:44:38.000000 | OneClick Restore | 5b4216d0-4bed-460f-8501-1589e7b45e01 | 5928554d-a882-4881-9a5c-90e834c071af | 41126400 | available |
+----------------------------+------------------+--------------------------------------+--------------------------------------+----------+-----------+
[root@upstreamcontroller ~(keystone_admin)]# workloadmgr restore-show 5b4216d0-4bed-460f-8501-1589e7b45e01
+------------------+------------------------------------------------------------------------------------------------------+
| Property | Value |
+------------------+------------------------------------------------------------------------------------------------------+
| created_at | 2019-09-24T12:44:38.000000 |
| description | - |
| error_msg | None |
| finished_at | 2019-09-24T12:46:07.000000 |
| host | Upstream2 |
| id | 5b4216d0-4bed-460f-8501-1589e7b45e01 |
| instances | [{"status": "available", "id": "b8506f04-1b99-4ca8-839b-6f5d2c20d9aa", "name": "temp", "metadata": |
| | {"instance_id": "c014a938-903d-43db-bfbb-ea4998ff1a0f", "production": "1", "config_drive": ""}}] |
| name | OneClick Restore |
| progress_msg | Restore from snapshot is complete |
| progress_percent | 100 |
| project_id | 8e16700ae3614da4ba80a4e57d60cdb9 |
| restore_options | {"description": "-", "oneclickrestore": true, "restore_type": "oneclick", "openstack": {"instances": |
| | [{"availability_zone": "US-West", "id": "c014a938-903d-43db-bfbb-ea4998ff1a0f", "name": "temp"}]}, |
| | "type": "openstack", "name": "OneClick Restore"} |
| restore_type | restore |
| size | 41126400 |
| snapshot_id | 5928554d-a882-4881-9a5c-90e834c071af |
| status | available |
| time_taken | 89 |
| updated_at | 2019-09-24T12:44:38.000000 |
| uploaded_size | 41126400 |
| user_id | d5fbd79f4e834f51bfec08be6d3b2ff2 |
| warning_msg | None |
| workload_id | 02b1aca2-c51a-454b-8c0f-99966314165e |
+------------------+------------------------------------------------------------------------------------------------------+# workloadmgr workload-delete <workload_id># source {customer admin rc file}
# openstack role remove Admin --user <my_admin_user> --user-domain <admin_domain> --domain <target_domain>
# openstack role remove Admin --user <my_admin_user> --user-domain <admin_domain> --project <target_project> --project-domain <target_domain>
# openstack role remove <Backup Trustee Role> --user <my_admin_user> --user-domain <admin_domain> --project <destination_project> --project-domain <target_domain>
# vi /etc/workloadmgr/workloadmgr.confvault_storage_nfs_export = <NFS_B1/NFS_B1-FQDN>:/<VOL-B1-Path>vault_storage_nfs_export = <NFS-IP/NFS-FQDN>:/<VOL-1-Path>,<NFS-IP/NFS-FQDN>:/<VOL—2-Path># systemctl restart wlm-workloads# vi /etc/tvault-contego/tvault-contego.confvault_storage_nfs_export = <NFS_B1-IP/NFS_B1-FQDN>:/<VOL-B1-Path>vault_storage_nfs_export = <NFS_B1-IP/NFS-FQDN>:/<VOL-B1-Path>,<NFS_B2-IP/NFS-FQDN>:/<VOL—B2-Path># systemctl restart tvault-contego#qemu-img info bd57ec9b-c4ac-4a37-a4fd-5c9aa002c778
image: bd57ec9b-c4ac-4a37-a4fd-5c9aa002c778
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 516K
cluster_size: 65536
backing file: /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW0=/workload_ac9cae9b-5e1b-4899-930c-6aa0600a2105/snapshot_1415095d-c047-400b-8b05-c88e57011263/vm_id_38b620f1-24ae-41d7-b0ab-85ffc2d7958b/vm_res_id_d4ab3431-5ce3-4a8f-a90b-07606e2ffa33_vda/7c39eb6a-6e42-418e-8690-b6368ecaa7bb
Format specific information:
compat: 1.1
lazy refcounts: false
refcount bits: 16
corrupt: false
# echo -n 10.10.2.20:/NFS_A1 | base64
MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl
# echo -n 10.20.3.22:/NFS_B2 | base64
MTAuMjAuMy4yMjovdXBzdHJlYW1fdGFyZ2V0#mkdir /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl
#mount --bind
/var/triliovault-mounts/MTAuMjAuMy4yMjovdXBzdHJlYW1fdGFyZ2V0/ /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl#vi /etc/fstab
/var/triliovault-mounts/MTAuMjAuMy4yMjovdXBzdHJlYW1fdGFyZ2V0/ / var/triliovault-mounts/ MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl none bind 0 0# source {customer admin rc file}
# openstack role add Admin --user <my_admin_user> --user-domain <admin_domain> --domain <target_domain>
# openstack role add Admin --user <my_admin_user> --user-domain <admin_domain> --project <target_project> --project-domain <target_domain>
# openstack role add <Backup Trustee Role> --user <my_admin_user> --user-domain <admin_domain> --project <destination_project> --project-domain <target_domain># workloadmgr workload-get-orphaned-workloads-list --migrate_cloud True
+------------+--------------------------------------+----------------------------------+----------------------------------+
| Name | ID | Project ID | User ID |
+------------+--------------------------------------+----------------------------------+----------------------------------+
| Workload_1 | 6639525d-736a-40c5-8133-5caaddaaa8e9 | 4224d3acfd394cc08228cc8072861a35 | 329880dedb4cd357579a3279835f392 |
| Workload_2 | 904e72f7-27bb-4235-9b31-13a636eb9c95 | 637a9ce3fd0d404cabf1a776696c9c04 | 329880dedb4cd357579a3279835f392 |
+------------+--------------------------------------+----------------------------------+----------------------------------+# openstack project list --domain <target_domain>
+----------------------------------+----------+
| ID | Name |
+----------------------------------+----------+
| 01fca51462a44bfa821130dce9baac1a | project1 |
| 33b4db1099ff4a65a4c1f69a14f932ee | project2 |
| 9139e694eb984a4a979b5ae8feb955af | project3 |
+----------------------------------+----------+ # openstack role assignment list --project <target_project> --project-domain <target_domain> --role <backup_trustee_role>
+----------------------------------+----------------------------------+-------+----------------------------------+--------+-----------+
| Role | User | Group | Project | Domain | Inherited |
+----------------------------------+----------------------------------+-------+----------------------------------+--------+-----------+
| 9fe2ff9ee4384b1894a90878d3e92bab | 72e65c264a694272928f5d84b73fe9ce | | 8e16700ae3614da4ba80a4e57d60cdb9 | | False |
| 9fe2ff9ee4384b1894a90878d3e92bab | d5fbd79f4e834f51bfec08be6d3b2ff2 | | 8e16700ae3614da4ba80a4e57d60cdb9 | | False |
| 9fe2ff9ee4384b1894a90878d3e92bab | f5b1d071816742fba6287d2c8ffcd6c4 | | 8e16700ae3614da4ba80a4e57d60cdb9 | | False |
+----------------------------------+----------------------------------+-------+----------------------------------+--------+-----------+# workloadmgr workload-reassign-workloads --new_tenant_id {target_project_id} --user_id {target_user_id} --workload_ids {workload_id} --migrate_cloud True
+-----------+--------------------------------------+----------------------------------+----------------------------------+
| Name | ID | Project ID | User ID |
+-----------+--------------------------------------+----------------------------------+----------------------------------+
| project1 | 904e72f7-27bb-4235-9b31-13a636eb9c95 | 4f2a91274ce9491481db795dcb10b04f | 3e05cac47338425d827193ba374749cc |
+-----------+--------------------------------------+----------------------------------+----------------------------------+ # workloadmgr workload-show ac9cae9b-5e1b-4899-930c-6aa0600a2105
+-------------------+------------------------------------------------------------------------------------------------------+
| Property | Value |
+-------------------+------------------------------------------------------------------------------------------------------+
| availability_zone | nova |
| created_at | 2019-04-18T02:19:39.000000 |
| description | Test Linux VMs |
| error_msg | None |
| id | ac9cae9b-5e1b-4899-930c-6aa0600a2105 |
| instances | [{"id": "38b620f1-24ae-41d7-b0ab-85ffc2d7958b", "name": "Test-Linux-1"}, {"id": |
| | "3fd869b2-16bd-4423-b389-18d19d37c8e0", "name": "Test-Linux-2"}] |
| interval | None |
| jobschedule | True |
| name | Test Linux |
| project_id | 2fc4e2180c2745629753305591aeb93b |
| scheduler_trust | None |
| status | available |
| storage_usage | {"usage": 60555264, "full": {"usage": 44695552, "snap_count": 1}, "incremental": {"usage": 15859712, |
| | "snap_count": 13}} |
| updated_at | 2019-11-15T02:32:43.000000 |
| user_id | 72e65c264a694272928f5d84b73fe9ce |
| workload_type_id | f82ce76f-17fe-438b-aa37-7a023058e50d |
+-------------------+------------------------------------------------------------------------------------------------------+# workloadmgr snapshot-list --workload_id ac9cae9b-5e1b-4899-930c-6aa0600a2105 --all True
+----------------------------+--------------+--------------------------------------+--------------------------------------+---------------+-----------+-----------+
| Created At | Name | ID | Workload ID | Snapshot Type | Status | Host |
+----------------------------+--------------+--------------------------------------+--------------------------------------+---------------+-----------+-----------+
| 2019-11-02T02:30:02.000000 | jobscheduler | f5b8c3fd-c289-487d-9d50-fe27a6561d78 | ac9cae9b-5e1b-4899-930c-6aa0600a2105 | full | available | Upstream2 |
| 2019-11-03T02:30:02.000000 | jobscheduler | 7e39e544-537d-4417-853d-11463e7396f9 | ac9cae9b-5e1b-4899-930c-6aa0600a2105 | incremental | available | Upstream2 |
| 2019-11-04T02:30:02.000000 | jobscheduler | 0c086f3f-fa5d-425f-b07e-a1adcdcafea9 | ac9cae9b-5e1b-4899-930c-6aa0600a2105 | incremental | available | Upstream2 |
+----------------------------+--------------+--------------------------------------+--------------------------------------+---------------+-----------+-----------+# workloadmgr snapshot-show --output networks 7e39e544-537d-4417-853d-11463e7396f9
+-------------------+--------------------------------------+
| Snapshot property | Value |
+-------------------+--------------------------------------+
| description | None |
| host | Upstream2 |
| id | 7e39e544-537d-4417-853d-11463e7396f9 |
| name | jobscheduler |
| progress_percent | 100 |
| restore_size | 44040192 Bytes or Approx (42.0MB) |
| restores_info | |
| size | 1310720 Bytes or Approx (1.2MB) |
| snapshot_type | incremental |
| status | available |
| time_taken | 154 Seconds |
| uploaded_size | 1310720 |
| workload_id | ac9cae9b-5e1b-4899-930c-6aa0600a2105 |
+-------------------+--------------------------------------+
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Instances | Value |
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-1 |
| ID | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| | |
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-2 |
| ID | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| | |
+----------------+---------------------------------------------------------------------------------------------------------------------+
+-------------+----------------------------------------------------------------------------------------------------------------------------------------------+
| Networks | Value |
+-------------+----------------------------------------------------------------------------------------------------------------------------------------------+
| ip_address | 172.20.20.20 |
| vm_id | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| network | {u'subnet': {u'ip_version': 4, u'cidr': u'172.20.20.0/24', u'gateway_ip': u'172.20.20.1', u'id': u'3a756a89-d979-4cda-a7f3-dacad8594e44',
u'name': u'Trilio Test'}, u'cidr': None, u'id': u'5f0e5d34-569d-42c9-97c2-df944f3924b1', u'name': u'Trilio_Test_Internal', u'network_type': u'neutron'} |
| mac_address | fa:16:3e:74:58:bb |
| | |
| ip_address | 172.20.20.13 |
| vm_id | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| network | {u'subnet': {u'ip_version': 4, u'cidr': u'172.20.20.0/24', u'gateway_ip': u'172.20.20.1', u'id': u'3a756a89-d979-4cda-a7f3-dacad8594e44',
u'name': u'Trilio Test'}, u'cidr': None, u'id': u'5f0e5d34-569d-42c9-97c2-df944f3924b1', u'name': u'Trilio_Test_Internal', u'network_type': u'neutron'} |
| mac_address | fa:16:3e:6b:46:ae |
+-------------+----------------------------------------------------------------------------------------------------------------------------------------------+[root@upstreamcontroller ~(keystone_admin)]# workloadmgr snapshot-show --output disks 7e39e544-537d-4417-853d-11463e7396f9
+-------------------+--------------------------------------+
| Snapshot property | Value |
+-------------------+--------------------------------------+
| description | None |
| host | Upstream2 |
| id | 7e39e544-537d-4417-853d-11463e7396f9 |
| name | jobscheduler |
| progress_percent | 100 |
| restore_size | 44040192 Bytes or Approx (42.0MB) |
| restores_info | |
| size | 1310720 Bytes or Approx (1.2MB) |
| snapshot_type | incremental |
| status | available |
| time_taken | 154 Seconds |
| uploaded_size | 1310720 |
| workload_id | ac9cae9b-5e1b-4899-930c-6aa0600a2105 |
+-------------------+--------------------------------------+
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Instances | Value |
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-1 |
| ID | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| | |
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-2 |
| ID | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| | |
+----------------+---------------------------------------------------------------------------------------------------------------------+
+-------------------+--------------------------------------------------+
| Vdisks | Value |
+-------------------+--------------------------------------------------+
| volume_mountpoint | /dev/vda |
| restore_size | 22020096 |
| resource_id | ebc2fdd0-3c4d-4548-b92d-0e16734b5d9a |
| volume_name | 0027b140-a427-46cb-9ccf-7895c7624493 |
| volume_type | None |
| label | None |
| volume_size | 1 |
| volume_id | 0027b140-a427-46cb-9ccf-7895c7624493 |
| availability_zone | nova |
| vm_id | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| metadata | {u'readonly': u'False', u'attached_mode': u'rw'} |
| | |
| volume_mountpoint | /dev/vda |
| restore_size | 22020096 |
| resource_id | 8007ed89-6a86-447e-badb-e49f1e92f57a |
| volume_name | 2a7f9e78-7778-4452-af5b-8e2fa43853bd |
| volume_type | None |
| label | None |
| volume_size | 1 |
| volume_id | 2a7f9e78-7778-4452-af5b-8e2fa43853bd |
| availability_zone | nova |
| vm_id | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| metadata | {u'readonly': u'False', u'attached_mode': u'rw'} |
| | |
+-------------------+--------------------------------------------------+{
u'description':u'<description of the restore>',
u'oneclickrestore':False,
u'restore_type':u'selective',
u'type':u'openstack',
u'name':u'<name of the restore>'
u'openstack':{
u'instances':[
{
u'name':u'<name instance 1>',
u'availability_zone':u'<AZ instance 1>',
u'nics':[ #####Leave empty for network topology restore
],
u'vdisks':[
{
u'id':u'<old disk id>',
u'new_volume_type':u'<new volume type name>',
u'availability_zone':u'<new cinder volume AZ>'
}
],
u'flavor':{
u'ram':<RAM in MB>,
u'ephemeral':<GB of ephemeral disk>,
u'vcpus':<# vCPUs>,
u'swap':u'<GB of Swap disk>',
u'disk':<GB of boot disk>,
u'id':u'<id of the flavor to use>'
},
u'include':<True/False>,
u'id':u'<old id of the instance>'
} #####Repeat for each instance in the snapshot
],
u'restore_topology':<True/False>,
u'networks_mapping':{
u'networks':[ #####Leave empty for network topology restore
]
}
}
}
# workloadmgr snapshot-selective-restore --filename restore.json {snapshot id}[root@upstreamcontroller ~(keystone_admin)]# workloadmgr restore-list --snapshot_id 5928554d-a882-4881-9a5c-90e834c071af
+----------------------------+------------------+--------------------------------------+--------------------------------------+----------+-----------+
| Created At | Name | ID | Snapshot ID | Size | Status |
+----------------------------+------------------+--------------------------------------+--------------------------------------+----------+-----------+
| 2019-09-24T12:44:38.000000 | OneClick Restore | 5b4216d0-4bed-460f-8501-1589e7b45e01 | 5928554d-a882-4881-9a5c-90e834c071af | 41126400 | available |
+----------------------------+------------------+--------------------------------------+--------------------------------------+----------+-----------+
[root@upstreamcontroller ~(keystone_admin)]# workloadmgr restore-show 5b4216d0-4bed-460f-8501-1589e7b45e01
+------------------+------------------------------------------------------------------------------------------------------+
| Property | Value |
+------------------+------------------------------------------------------------------------------------------------------+
| created_at | 2019-09-24T12:44:38.000000 |
| description | - |
| error_msg | None |
| finished_at | 2019-09-24T12:46:07.000000 |
| host | Upstream2 |
| id | 5b4216d0-4bed-460f-8501-1589e7b45e01 |
| instances | [{"status": "available", "id": "b8506f04-1b99-4ca8-839b-6f5d2c20d9aa", "name": "temp", "metadata": |
| | {"instance_id": "c014a938-903d-43db-bfbb-ea4998ff1a0f", "production": "1", "config_drive": ""}}] |
| name | OneClick Restore |
| progress_msg | Restore from snapshot is complete |
| progress_percent | 100 |
| project_id | 8e16700ae3614da4ba80a4e57d60cdb9 |
| restore_options | {"description": "-", "oneclickrestore": true, "restore_type": "oneclick", "openstack": {"instances": |
| | [{"availability_zone": "US-West", "id": "c014a938-903d-43db-bfbb-ea4998ff1a0f", "name": "temp"}]}, |
| | "type": "openstack", "name": "OneClick Restore"} |
| restore_type | restore |
| size | 41126400 |
| snapshot_id | 5928554d-a882-4881-9a5c-90e834c071af |
| status | available |
| time_taken | 89 |
| updated_at | 2019-09-24T12:44:38.000000 |
| uploaded_size | 41126400 |
| user_id | d5fbd79f4e834f51bfec08be6d3b2ff2 |
| warning_msg | None |
| workload_id | 02b1aca2-c51a-454b-8c0f-99966314165e |
+------------------+------------------------------------------------------------------------------------------------------+# vi /etc/workloadmgr/workloadmgr.confvault_storage_nfs_export = <NFS_B1-IP/NFS-FQDN>:/<VOL-B1-Path>,<NFS_B2-IP/NFS-FQDN>:/<VOL—B2-Path>vault_storage_nfs_export = <NFS_B1-IP/NFS_B1-FQDN>:/<VOL-B1-Path># systemctl restart wlm-workloads# vi /etc/tvault-contego/tvault-contego.confvault_storage_nfs_export = <NFS_B1-IP/NFS-FQDN>:/<VOL-B1-Path>,<NFS_B2-IP/NFS-FQDN>:/<VOL—B2-Path>vault_storage_nfs_export = <NFS-IP/NFS-FQDN>:/<VOL-1-Path># systemctl restart tvault-contego# source {customer admin rc file}
# openstack role remove Admin --user <my_admin_user> --user-domain <admin_domain> --domain <target_domain>
# openstack role remove Admin --user <my_admin_user> --user-domain <admin_domain> --project <target_project> --project-domain <target_domain>
# openstack role remove <Backup Trustee Role> --user <my_admin_user> --user-domain <admin_domain> --project <destination_project> --project-domain <target_domain>
HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 05 Nov 2020 11:28:43 GMT
Content-Type: application/json
Content-Length: 4308
Connection: keep-alive
X-Compute-Request-Id: req-0bc531b6-be6e-43b4-90bd-39ef26ef1463
{
"restores":[
{
"id":"29fdc1f8-1d53-4a10-bb45-e539a64cdbfc",
"created_at":"2020-11-05T10:17:40.000000",
"updated_at":"2020-11-05T10:17:40.000000",
HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 05 Nov 2020 14:04:45 GMT
Content-Type: application/json
Content-Length: 2639
Connection: keep-alive
X-Compute-Request-Id: req-30640219-e94e-4651-9b9e-49f5574e2a7f
{
"restore":{
"id":"29fdc1f8-1d53-4a10-bb45-e539a64cdbfc",
"created_at":"2020-11-05T10:17:40.000000",
"updated_at":"2020-11-05T10:17:40.000000",
"finished_at":"2020-11-05T10:27:20.000000",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"status":"available",
"restore_type":"restore",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"snapshot_details":{
"created_at":"2020-11-04T13:58:37.000000",
"updated_at":"2020-11-05T10:27:22.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"snapshot_type":"full",
"display_name":"API taken 2",
"display_description":"API taken description 2",
"size":44171264,
"restore_size":2147483648,
"uploaded_size":44171264,
"progress_percent":100,
"progress_msg":"Creating Instance: cirros-2",
"warning_msg":null,
"error_msg":null,
"host":"TVM1",
"finished_at":"2020-11-04T14:06:03.000000",
"data_deleted":false,
"pinned":false,
"time_taken":428,
"vault_storage_id":null,
"status":"available"
},
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"instances":[
{
"id":"1fb104bf-7e2b-4cb6-84f6-96aabc8f1dd2",
"name":"cirros-2",
"status":"available",
"metadata":{
"config_drive":"",
"instance_id":"67d6a100-fee6-4aa5-83a1-66b070d2eabe",
"production":"1"
}
},
{
"id":"b083bb70-e384-4107-b951-8e9e7bbac380",
"name":"cirros-1",
"status":"available",
"metadata":{
"config_drive":"",
"instance_id":"e33c1eea-c533-4945-864d-0da1fc002070",
"production":"1"
}
}
],
"networks":[
],
"subnets":[
],
"routers":[
],
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/c76b3355a164498aa95ddbc960adc238/restores/29fdc1f8-1d53-4a10-bb45-e539a64cdbfc"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/c76b3355a164498aa95ddbc960adc238/restores/29fdc1f8-1d53-4a10-bb45-e539a64cdbfc"
}
],
"name":"OneClick Restore",
"description":"-",
"host":"TVM2",
"size":2147483648,
"uploaded_size":2147483648,
"progress_percent":100,
"progress_msg":"Restore from snapshot is complete",
"warning_msg":null,
"error_msg":null,
"time_taken":580,
"restore_options":{
"name":"OneClick Restore",
"oneclickrestore":true,
"restore_type":"oneclick",
"openstack":{
"instances":[
{
"name":"cirros-2",
"id":"67d6a100-fee6-4aa5-83a1-66b070d2eabe",
"availability_zone":"nova"
},
{
"name":"cirros-1",
"id":"e33c1eea-c533-4945-864d-0da1fc002070",
"availability_zone":"nova"
}
]
},
"type":"openstack",
"description":"-"
},
"metadata":[
]
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 05 Nov 2020 14:21:07 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-0e155b21-8931-480a-a749-6d8764666e4dHTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 05 Nov 2020 15:13:30 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-98d4853c-314c-4f27-bd3f-f81bda1a2840HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Thu, 05 Nov 2020 14:30:56 GMT
Content-Type: application/json
Content-Length: 992
Connection: keep-alive
X-Compute-Request-Id: req-7e18c309-19e5-49cb-a07e-90dd368fddae
{
"restore":{
"id":"3df1d432-2f76-4ebd-8f89-1275428842ff",
"created_at":"2020-11-05T14:30:56.048656",
"updated_at":"2020-11-05T14:30:56.048656",
"finished_at":null,
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"status":"restoring",
"restore_type":"restore",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/c76b3355a164498aa95ddbc960adc238/restores/3df1d432-2f76-4ebd-8f89-1275428842ff"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/c76b3355a164498aa95ddbc960adc238/restores/3df1d432-2f76-4ebd-8f89-1275428842ff"
}
],
"name":"One Click Restore",
"description":"One Click Restore",
"host":"",
"size":0,
"uploaded_size":0,
"progress_percent":0,
"progress_msg":null,
"warning_msg":null,
"error_msg":null,
"time_taken":0,
"restore_options":{
"openstack":{
},
"type":"openstack",
"oneclickrestore":true,
"vmware":{
},
"restore_type":"oneclick"
},
"metadata":[
]
}
}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Mon, 09 Nov 2020 09:53:31 GMT
Content-Type: application/json
Content-Length: 1713
Connection: keep-alive
X-Compute-Request-Id: req-84f00d6f-1b12-47ec-b556-7b3ed4c2f1d7
{
"restore":{
"id":"778baae0-6c64-4eb1-8fa3-29324215c43c",
"created_at":"2020-11-09T09:53:31.037588",
"updated_at":"2020-11-09T09:53:31.037588",
"finished_at":null,
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"status":"restoring",
"restore_type":"restore",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/c76b3355a164498aa95ddbc960adc238/restores/778baae0-6c64-4eb1-8fa3-29324215c43c"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/c76b3355a164498aa95ddbc960adc238/restores/778baae0-6c64-4eb1-8fa3-29324215c43c"
}
],
"name":"API",
"description":"API Created",
"host":"",
"size":0,
"uploaded_size":0,
"progress_percent":0,
"progress_msg":null,
"warning_msg":null,
"error_msg":null,
"time_taken":0,
"restore_options":{
"openstack":{
"instances":[
{
"vdisks":[
{
"new_volume_type":"iscsi",
"id":"365ad75b-ca76-46cb-8eea-435535fd2e22",
"availability_zone":"nova"
}
],
"name":"cirros-1-selective",
"availability_zone":"nova",
"nics":[
],
"flavor":{
"vcpus":1,
"disk":1,
"swap":"",
"ram":512,
"ephemeral":0,
"id":"1"
},
"include":true,
"id":"e33c1eea-c533-4945-864d-0da1fc002070"
},
{
"include":false,
"id":"67d6a100-fee6-4aa5-83a1-66b070d2eabe"
}
],
"restore_topology":false,
"networks_mapping":{
"networks":[
{
"snapshot_network":{
"subnet":{
"id":"b7b54304-aa82-4d50-91e6-66445ab56db4"
},
"id":"5fb7027d-a2ac-4a21-9ee1-438c281d2b26"
},
"target_network":{
"subnet":{
"id":"b7b54304-aa82-4d50-91e6-66445ab56db4"
},
"id":"5fb7027d-a2ac-4a21-9ee1-438c281d2b26",
"name":"internal"
}
}
]
}
},
"restore_type":"selective",
"type":"openstack",
"oneclickrestore":false
},
"metadata":[
]
}
}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Mon, 09 Nov 2020 12:53:03 GMT
Content-Type: application/json
Content-Length: 1341
Connection: keep-alive
X-Compute-Request-Id: req-311fa97e-0fd7-41ed-873b-482c149ee743
{
"restore":{
"id":"0bf96f46-b27b-425c-a10f-a861cc18b82a",
"created_at":"2020-11-09T12:53:02.726757",
"updated_at":"2020-11-09T12:53:02.726757",
"finished_at":null,
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"status":"restoring",
"restore_type":"restore",
"snapshot_id":"ed4f29e8-7544-4e1c-af8a-a76031211926",
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/c76b3355a164498aa95ddbc960adc238/restores/0bf96f46-b27b-425c-a10f-a861cc18b82a"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/c76b3355a164498aa95ddbc960adc238/restores/0bf96f46-b27b-425c-a10f-a861cc18b82a"
}
],
"name":"API",
"description":"API description",
"host":"",
"size":0,
"uploaded_size":0,
"progress_percent":0,
"progress_msg":null,
"warning_msg":null,
"error_msg":null,
"time_taken":0,
"restore_options":{
"restore_type":"inplace",
"type":"openstack",
"oneclickrestore":false,
"openstack":{
"instances":[
{
"restore_boot_disk":true,
"include":true,
"id":"7c1bb5d2-aa5a-44f7-abcd-2d76b819b4c8",
"vdisks":[
{
"restore_cinder_volume":true,
"id":"f6b3fef6-4b0e-487e-84b5-47a14da716ca"
}
]
},
{
"restore_boot_disk":true,
"include":true,
"id":"08dab61c-6efd-44d3-a9ed-8e789d338c1b",
"vdisks":[
{
"restore_cinder_volume":true,
"id":"53204f34-019d-4ba8-ada1-e6ab7b8e5b43"
}
]
}
]
}
},
"metadata":[
]
}
}{
"restore":{
"options":{
"openstack":{
},
"type":"openstack",
"oneclickrestore":true,
"vmware":{},
"restore_type":"oneclick"
},
"name":"One Click Restore",
"description":"One Click Restore"
}
}{
"restore":{
"name":"<restore name>",
"description":"<restore description>",
"options":{
"openstack":{
"instances":[
{
"name":"<new name of instance>",
"include":<true/false>,
"id":"<original id of instance to be restored>"
"availability_zone":"<availability zone>",
"vdisks":[
{
"id":"<original ID of Volume>",
"new_volume_type":"<new volume type>",
"availability_zone":"<Volume availability zone>"
}
],
"nics":[
{
'mac_address':'<mac address of the pre-created port>',
'ip_address':'<IP of the pre-created port>',
'id':'<ID of the pre-created port>',
'network':{
'subnet':{
'id':'<ID of the subnet of the pre-created port>'
},
'id':'<ID of the network of the pre-created port>'
}
],
"flavor":{
"vcpus":<Integer>,
"disk":<Integer>,
"swap":<Integer>,
"ram":<Integer>,
"ephemeral":<Integer>,
"id":<Integer>
}
}
],
"restore_topology":<true/false>,
"networks_mapping":{
"networks":[
{
"snapshot_network":{
"subnet":{
"id":"<ID of the original Subnet ID>"
},
"id":"<ID of the original Network ID>"
},
"target_network":{
"subnet":{
"id":"<ID of the target Subnet ID>"
},
"id":"<ID of the target Network ID>",
"name":"<name of the target network>"
}
}
]
}
},
"restore_type":"selective",
"type":"openstack",
"oneclickrestore":false
}
}
}{
"restore":{
"name":"<restore-name>",
"description":"<restore-description>",
"options":{
"restore_type":"inplace",
"type":"openstack",
"oneclickrestore":false,
"openstack":{
"instances":[
{
"restore_boot_disk":<Boolean>,
"include":<Boolean>,
"id":"<ID of the instance the volumes are attached to>",
"vdisks":[
{
"restore_cinder_volume":<boolean>,
"id":"<ID of the Volume to restore>"
}
]
}
]
}
}
}
}