Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...

header:
header_color: blue
body_text_color: "#DC143C"
body_text:
header_font_size: 25px
body_text_font_size: 22px[root@TVM1 ~]# source /home/stack/myansible/bin/activate
(myansible) [root@TVM1 ~]# cd /home/stack/myansible/lib/python3.6/site-packages/tvault_configurator
(myansible) [root@TVM1 tvault_configurator]# python recreate_conf.py
(myansible) [root@TVM1 tvault_configurator]# systemctl restart tvault-configUsername: admin
Password: passwordsource /home/stack/myansible/bin/activate[DEFAULT]
vault_storage_type = nfs
vault_storage_nfs_export = 192.168.1.34:/mnt/tvault/tvm5
vault_storage_nfs_options = nolock,soft,timeo=180,intr,lookupcache=none
vault_data_directory_old = /var/triliovault
vault_data_directory = /var/trilio/triliovault-mounts
log_file = /var/log/kolla/triliovault-datamover/tvault-contego.log
debug = False
verbose = True
max_uploads_pending = 3
max_commit_pending = 3
dmapi_transport_url = rabbit://openstack:[email protected]:5672,openstack:[email protected]:5672,openstack:[email protected]:5672//
[dmapi_database]
connection = mysql+pymysql://dmapi:x5nvYXnAn4rXmCHfWTK8h3wwShA4vxMq3gE2jH57@kolla-victoriaR-internal.triliodata.demo:3306/dmapi
[libvirt]
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = volumes
[ceph]
keyring_ext = .keyring
ceph_dir = /etc/ceph/
[contego_sys_admin]
helper_command = sudo /usr/bin/privsep-helper
[conductor]
use_local = True
[oslo_messaging_rabbit]
ssl = false
[cinder]
http_retries = 10workloadmgr license-create <license_file>[DEFAULT]
vault_storage_type = nfs
vault_storage_nfs_export = 192.168.1.34:/mnt/tvault/tvm5
vault_storage_nfs_options = nolock,soft,timeo=180,intr,lookupcache=none
vault_data_directory_old = /var/triliovault
vault_data_directory = /var/trilio/triliovault-mounts
log_file = /var/log/kolla/triliovault-datamover/tvault-contego.log
debug = False
verbose = True
max_uploads_pending = 3
max_commit_pending = 3
dmapi_transport_url = rabbit://openstack:[email protected]:5672,openstack:[email protected]:5672,openstack:[email protected]:5672//
[dmapi_database]
connection = mysql+pymysql://dmapi:x5nvYXnAn4rXmCHfWTK8h3wwShA4vxMq3gE2jH57@kolla-victoriaR-internal.triliodata.demo:3306/dmapi
[libvirt]
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = volumes
[ceph]
keyring_ext = .volumes.keyring
ceph_dir = /etc/ceph/directory1/,/etc/ceph/directory2/
[contego_sys_admin]
helper_command = sudo /usr/bin/privsep-helper
[conductor]
use_local = True
[oslo_messaging_rabbit]
ssl = false
[cinder]
http_retries = 10
workloadmgr trust-listworkloadmgr trust-show <trust_id>workloadmgr trust-create [--is_cloud_trust {True,False}] <role_name>workloadmgr trust-delete <trust_id>qemu-img info 85b645c5-c1ea-4628-b5d8-1faea0e9d549
image: 85b645c5-c1ea-4628-b5d8-1faea0e9d549
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 21M
cluster_size: 65536
backing file: /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW0=/workload_3c2fbee5-ad90-4448-b009-5047bcffc2ea/snapshot_f4874ed7-fe85-4d7d-b22b-082a2e068010/vm_id_9894f013-77dd-4514-8e65-818f4ae91d1f/vm_res_id_9ae3a6e7-dffe-4424-badc-bc4de1a18b40_vda/a6289269-3e72-4085-adca-e228ba656984
Format specific information:
compat: 1.1
lazy refcounts: false
refcount bits: 16
corrupt: false# echo -n 10.10.2.20:/upstream | base64
MTAuMTAuMi4yMDovdXBzdHJlYW0=#mount --bind <mount-path1> <mount-path2>#vi /etc/fstab
<mount-path1> <mount-path2> none bind 0 0
Please ask your Trilio Customer Success Manager or Engineer.
This page will be updated once the script is publicly available../backing_file_update.sh /var/triliovault-mounts/<base64>/workload_<workload_id>/tmp/backing_file_update.log


/etc/kolla/passwords.yml➡️ Trilio entries had been appended at the end of the filedocker stop triliovault_datamover_apidocker rm triliovault_datamover_apirm -rf /etc/kolla/triliovault-datamover-apirm -rf /var/log/kolla/triliovault-datamover-api/docker stop triliovault_datamoverdocker rm triliovault_datamoverrm -rf /etc/kolla/triliovault-datamoverrm -rf /var/log/kolla/triliovault-datamover/rm /etc/kolla/haproxy/services.d/triliovault-datamover-api.cfg
docker restart haproxykolla-ansible -i multinode deployopenstack service delete dmapi
openstack user delete dmapimysql -u root -pDROP DATABASE dmapi;
DROP USER dmapi;virsh listvirsh destroy <Trilio VM Name or ID>virsh undefine <Trilio VM name>#For RHEL and centos
yum install genisoimage
#For Ubuntu
apt-get install genisoimage[root@kvm]# cat meta-data
instance-id: triliovault
network-interfaces: |
auto eth0
iface eth0 inet static
address 158.69.170.20
netmask 255.255.255.0
gateway 158.69.170.30
dns-nameservers 11.11.0.51
local-hostname: localhost[root@kvm]# cat user-data
#cloud-config
chpasswd:
list: |
root:password1
stack:password2
expire: Falsegenisoimage -output tvault-firstboot-config.iso -volid cidata -joliet -rock user-data meta-datatar Jxvf TrilioVault_file.tar.xzvirt-install -n triliovault-vm --memory 24576 --vcpus 8 \
--os-type linux \
--disk tvault-appliance-os-3.0.154.qcow2,device=disk,bus=virtio,size=40 \
--network bridge=virbr0,model=virtio \
--network bridge=virbr1,model=virtio \
--graphics none \
--import \
--disk path=tvault-firstboot-config.iso,device=cdromsudo yum remove cloud-initserver {
listen <dashboard_ip>:8000 ssl ;
ssl_certificate "/opt/stack/data/cert/workloadmgr.cert";
ssl_certificate_key "/opt/stack/data/cert/workloadmgr.key";
keepalive_timeout 65;
proxy_read_timeout 1800;
access_log on;
location / {
proxy_set_header Host $host:$server_port;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass https://<virtual_ip>:443;
}
}
server {
listen <dashboard_ip>:3001 ssl ;
ssl_certificate "/opt/stack/data/cert/workloadmgr.cert";
ssl_certificate_key "/opt/stack/data/cert/workloadmgr.key";
keepalive_timeout 65;
proxy_read_timeout 1800;
access_log on;
location / {
proxy_set_header Host $host:$server_port;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass https://<virtual_ip>:3001;
}
}
pcs resource create dashboard_ip ocf:heartbeat:IPaddr2 ip=<new_vip> cidr_netmask=<netmask> nic=<new_nw_interface> op monitor interval=30s
pcs constraint colocation add dashboard_ip virtual_ipiptables -A INPUT -p tcp -s tvm1,tvm2,tvm3 --dport 80 -j ACCEPT
iptables -A INPUT -p tcp -s tvm1,tvm2,tvm3 --dport 443 -j ACCEPT
iptables -A INPUT -p tcp --dport 80 -j DROP
iptables -A INPUT -p tcp --dport 443 -j DROPhttps://<dashboard_ip>:8000[DEFAULT]
max_wait_for_upload = 48[global_job_scheduler]
misfire_grace_time = 600 [DEFAULT]
dmapi_workers = 16 retries 5
timeout http-request 10m
timeout queue 10m
timeout connect 10m
timeout client 10m
timeout server 10m
timeout check 10m
balance roundrobin
maxconn 50000[cinder]
http_retries = 10header:
header_color: blue
body_text_color: "#DC143C"
body_text:
header_font_size: 25px
body_text_font_size: 22px[filter:authtoken]
interface = internal[root@TVM1 ssl]# cd /etc/tvault/ssl/
[root@TVM1 ssl]# ls -lisa server*
577678 0 lrwxrwxrwx 1 root root 8 Jan 21 14:36 server.crt -> TVM1.crt
577672 0 lrwxrwxrwx 1 root root 8 Jan 21 14:36 server.key -> TVM1.key
1178820 0 lrwxrwxrwx 1 root root 8 Jan 21 14:36 server.pem -> TVM1.pemdef main():
# configure the networking
#create_ssl_certificates()
http_thread = Thread(target=main_http)
http_thread.daemon = True # thread dies with the program
http_thread.start()
bottle.debug(True)
srv = SSLWSGIRefServer(host='::', port=443)
bottle.run(server=srv, app=app, quiet=False, reloader=False)[root@TVM1 ~]# systemctl restart tvault-config
[root@TVM1 ~]# pcs resource restart lb_nginx-clone
lb_nginx-clone successfully restarted[root@TVM1 ~]# cd /opt/stack/data/cert/
[root@TVM1 cert]# ls -lisa workloadmgr*
577678 0 lrwxrwxrwx 1 root root 8 Jan 21 14:36 workloadmgr.crt
577672 0 lrwxrwxrwx 1 root root 8 Jan 21 14:36 workloadmgr.key[root@TVM1 ~]# pcs resource restart lb_nginx-clone
lb_nginx-clone successfully restartedworkloadmgr disable-scheduler --workloadids <workloadid>workloadmgr enable-scheduler --workloadids <workloadid>workloadmgr scheduler-trust-validate <workload_id>#check current mount point
[root@compute ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 7.8G 0 7.8G 0% /dev
tmpfs 7.8G 0 7.8G 0% /dev/shm
tmpfs 7.8G 26M 7.8G 1% /run
tmpfs 7.8G 0 7.8G 0% /sys/fs/cgroup
/dev/mapper/cl-root 280G 12G 269G 5% /
/dev/sda1 976M 197M 713M 22% /boot
192.168.1.34:/mnt/tvault/42436 2.5T 1005G 1.5T 41% /var/trilio/triliovault-mounts/MTkyLjE2OC4xLjM0Oi9tbnQvdHZhdWx0LzQyNDM2
#Stop triliovault_datamover
[root@compute ~]# docker stop triliovault_datamover
triliovault_datamover
[root@compute ~]#
#Delete triliovault_datamover
[root@compute ~]# docker rm triliovault_datamover
triliovault_datamover
[root@compute ~]#
#unmount mount point
[root@compute ~]# umount /var/trilio/triliovault-mounts/MTkyLjE2OC4xLjM0Oi9tbnQvdHZhdWx0LzQyNDM2
#check mount point is unmounted successfully
[root@compute ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 7.8G 0 7.8G 0% /dev
tmpfs 7.8G 0 7.8G 0% /dev/shm
tmpfs 7.8G 26M 7.8G 1% /run
tmpfs 7.8G 0 7.8G 0% /sys/fs/cgroup
/dev/mapper/cl-root 280G 12G 269G 5% /
/dev/sda1 976M 197M 713M 22% /boot
[root@compute ~]#
#Delete mounted dir from compute node
[root@compute trilio]# rm -rf /var/trilio/triliovault-mounts/MTkyLjE2OC4xLjM0Oi9tbnQvdHZhdWx0LzQyNDM2


--snapshotids <snapshotid> ➡️ Search only in specified snapshot ids snapshot-id: include the instance with this UUID--workload_ids <workload_id>➡️ Specify workload_ids which need to reassign to new tenant. If not provided then all the workloads from old tenant will get reassigned to new tenant. Specifiy multiple times for multiple workloads.workloadmgr filepath-search [--snapshotids <snapshotid>]
[--end_filter <end_filter>]
[--start_filter <start_filter>]
[--date_from <date_from>]
[--date_to <date_to>]
<vm_id> <file_path>workloadmgr workload-get-importworkloads-list [--project_id <project_id>]workloadmgr workload-importworkloads [--workloadids <workloadid>]workloadmgr workload-get-orphaned-workloads-list [--migrate_cloud {True,False}]
[--generate_yaml {True,False}]workloadmgr workload-reassign-workloads
[--old_tenant_ids <old_tenant_id>]
[--new_tenant_id <new_tenant_id>]
[--workload_ids <workload_id>]
[--user_id <user_id>]
[--migrate_cloud {True,False}]
[--map_file <map_file>]reassign_mappings:
- old_tenant_ids: [] #user can provide list of old_tenant_ids or workload_ids
new_tenant_id: new_tenant_id
user_id: user_id
workload_ids: [] #user can provide list of old_tenant_ids or workload_ids
migrate_cloud: True/False #Set to True if want to reassign workloads from
# other clouds as well. Default is False
- old_tenant_ids: [] #user can provide list of old_tenant_ids or workload_ids
new_tenant_id: new_tenant_id
user_id: user_id
workload_ids: [] #user can provide list of old_tenant_ids or workload_ids
migrate_cloud: True/False #Set to True if want to reassign workloads from
# other clouds as well. Default is Falseroot@controller:~# kolla-ansible -i multinode deploy##Check that all Containers are up and running
#Controller node
root@controller:~# docker ps -a | grep trilio
583b8d42ab42 trilio/ubuntu-binary-trilio-datamover-api:4.1.36-ussuri "dumb-init --single-…" 3 days ago Up 3 days openstack-nova-api-triliodata-plugin
3be25d3819ac trilio/ubuntu-binary-trilio-horizon-plugin:4.1.36-ussuri "dumb-init --single-…" 4 days ago Up 4 days horizon
#Compute node
root@compute:~# docker ps -a | grep trilio
bf52face23fb trilio/ubuntu-binary-trilio-datamover:4.1.36-ussuri "dumb-init --single-…" 3 days ago Up 3 days trilio-datamover
## Verify the backup target has been changed successfully
# In case of switch to NFS
[root@compute ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 7.8G 0 7.8G 0% /dev
tmpfs 7.8G 0 7.8G 0% /dev/shm
tmpfs 7.8G 26M 7.8G 1% /run
tmpfs 7.8G 0 7.8G 0% /sys/fs/cgroup
/dev/mapper/cl-root 280G 12G 269G 5% /
/dev/sda1 976M 197M 713M 22% /boot
192.168.1.34:/mnt/tvault/42436 2.5T 1005G 1.5T 41% /var/trilio/triliovault-mounts/MTkyLjE2OC4xLjM0Oi9tbnQvdHZhdWx0LzQyNDM2
#In case of switch to S3
[root@compute ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 7.8G 0 7.8G 0% /dev
tmpfs 7.8G 0 7.8G 0% /dev/shm
tmpfs 7.8G 34M 7.8G 1% /run
tmpfs 7.8G 0 7.8G 0% /sys/fs/cgroup
/dev/mapper/cl-root 280G 12G 269G 5% /
/dev/sda1 976M 197M 713M 22% /boot
Trilio - - 0.0K - /var/trilio/triliovault-mounts
##Reverify in the triliovault_datamover containers
[root@compute ~]# docker exec -it triliovault_datamover bash
(triliovault-datamover)[nova@compute /]$ df -h
Filesystem Size Used Avail Use% Mounted on
overlay 280G 12G 269G 5% /
tmpfs 7.8G 0 7.8G 0% /sys/fs/cgroup
devtmpfs 7.8G 0 7.8G 0% /dev
tmpfs 7.8G 0 7.8G 0% /dev/shm
/dev/mapper/cl-root 280G 12G 269G 5% /etc/iscsi
tmpfs 6.3G 0 6.3G 0% /var/triliovault/tmpfs
Trilio - - 0.0K - /var/trilio/triliovault-mountsworkloadmgr snapshot-list --all=True
workloadmgr restore-listpcs statuspcs status
Cluster name: triliovault
WARNINGS:
Corosync and pacemaker node names do not match (IPs used in setup?)
Stack: corosync
Current DC: tvm3 (version 1.1.23-1.el7_9.1-9acf116022) - partition with quorum
Last updated: Thu Aug 26 12:10:32 2021
Last change: Thu Aug 26 08:02:51 2021 by root via crm_resource on tvm1
3 nodes configured
8 resource instances configured
Online: [ tvm1 tvm2 tvm3 ]
Full list of resources:
virtual_ip (ocf::heartbeat:IPaddr2): Started tvm1
virtual_ip_public (ocf::heartbeat:IPaddr2): Started tvm1
virtual_ip_admin (ocf::heartbeat:IPaddr2): Started tvm1
virtual_ip_internal (ocf::heartbeat:IPaddr2): Started tvm1
wlm-cron (systemd:wlm-cron): Started tvm1
Clone Set: lb_nginx-clone [lb_nginx]
Started: [ tvm1 ]
Stopped: [ tvm2 tvm3 ]
Daemon Status:
corosync: active/enabled
pacemaker: active/enabled
pcsd: active/enabledsystemctl stop wlm-api
systemctl stop wlm-scheduler
systemctl stop wlm-workloads
systemctl stop mysqld
rabbitmqctl stopreboot
shutdownsystemctl stop wlm-api
systemctl stop wlm-scheduler
systemctl stop wlm-workloads
systemctl stop mysqld
rabbitmqctl stopshutdownsystemctl stop wlm-api
systemctl stop wlm-scheduler
systemctl stop wlm-workloads
systemctl stop mysqld
rabbitmqctl stop
pcs resource disable wlm-cron
pcs resource disable lb_nginx-cloneshutdowngalera_new_clustercd /opt/openstack-ansible/playbooks
openstack-ansible os-tvault-install.yml --tags "tvault-all-uninstall"cd /opt/openstack-ansible/playbooks
openstack-ansible lxc-containers-destroy.yml --limit "DMPAI CONTAINER_NAME"#tvault-dmapi
tvault-dmapi_hosts:
infra-1:
ip: 172.26.0.3
infra-2:
ip: 172.26.0.4
#tvault-datamover
tvault_compute_hosts:
infra-1:
ip: 172.26.0.7
infra-2:
ip: 172.26.0.8# Datamover haproxy setting
haproxy_extra_services:
- service:
haproxy_service_name: datamover_service
haproxy_backend_nodes: "{{ groups['dmapi_all'] | default([]) }}"
haproxy_ssl: "{{ haproxy_ssl }}"
haproxy_port: 8784
haproxy_balance_type: http
haproxy_backend_options:
- "httpchk GET / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"rm /opt/openstack-ansible/inventory/env.d/tvault-dmapi.yml source cloudadmin.rc
openstack endpoint delete "internal datamover service endpoint_id"
openstack endpoint delete "public datamover service endpoint_id"
openstack endpoint delete "admin datamover service endpoint_id"lxc-attach -n "GALERA CONTAINER NAME"
mysql -u root -p "root password"
DROP DATABASE dmapi;
DROP USER dmapi;lxc-attach -n "RABBITMQ CONTAINER NAME"
rabbitmqctl delete_user dmapi
rabbitmqctl delete_vhost /dmapirm /etc/haproxy/conf.d/datamover_servicefrontend datamover_service-front-1
bind ussuriubuntu.triliodata.demo:8784 ssl crt /etc/ssl/private/haproxy.pem ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
option httplog
option forwardfor except 127.0.0.0/8
reqadd X-Forwarded-Proto:\ https
mode http
default_backend datamover_service-back
frontend datamover_service-front-2
bind 172.26.1.2:8784
option httplog
option forwardfor except 127.0.0.0/8
mode http
default_backend datamover_service-back
backend datamover_service-back
mode http
balance leastconn
stick store-request src
stick-table type ip size 256k expire 30m
option forwardfor
option httplog
option httpchk GET / HTTP/1.0\r\nUser-agent:\ osa-haproxy-healthcheck
server controller_dmapi_container-bf17d5b3 172.26.1.75:8784 check port 8784 inter 12000 rise 1 fall 1systemctl restart haproxyrm -rf /opt/config-certs/rabbitmq
rm -rf /opt/config-certs/s3virsh listvirsh destroy <Trilio VM Name or ID>virsh undefine <Trilio VM name>juju exec [-m <model>] --unit trilio-wlm/leader "sudo crm configure property maintenance-mode=false"<high_watermark>➡️ Value to set for High Watermark warnings<project_id>➡️ Project to assign the quota todeb [trusted=yes] https://apt.fury.io/triliodata-4-1/ /juju exec [-m <model>] --application trilio-wlm 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" workloadmgr python3-workloadmgrclient python3-contegoclient s3-fuse-plugin'
juju exec [-m <model>] --application trilio-horizon-plugin 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" tvault-horizon-plugin python-workloadmgrclient'
juju exec [-m <model>] --application trilio-dm-api 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" python3-dmapi'
juju exec [-m <model>] --application trilio-data-mover 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" tvault-contego s3-fuse-plugin'
juju exec [-m <model>] --application trilio-wlm 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" workloadmgr python3-workloadmgrclient python3-contegoclient python3-s3-fuse-plugin'
juju exec [-m <model>] --application trilio-horizon-plugin 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" python3-tvault-horizon-plugin python3-workloadmgrclient'
juju exec [-m <model>] --application trilio-dm-api 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" python3-dmapi'
juju exec [-m <model>] --application trilio-data-mover 'sudo apt-get update && sudo apt-get install -y --only-upgrade -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" python3-tvault-contego python3-s3-fuse-plugin'
trilio-data-mover <package version> active 3 trilio-data-mover jujucharms 8 ubuntu
trilio-dm-api <package version> active 1 trilio-dm-api jujucharms 5 ubuntu
trilio-horizon-plugin <package version> active 1 trilio-horizon-plugin jujucharms 4 ubuntu
trilio-wlm <package version> active 3 trilio-wlm jujucharms 7 ubuntujuju exec [-m <model>] --unit trilio-wlm/leader "alembic -c /etc/workloadmgr/alembic.ini upgrade heads"juju exec [-m <model>] --unit trilio-wlm/leader "alembic -c /etc/workloadmgr/alembic.ini current"juju exec [-m <model>] -application trilio-horizon-plugin "systemctl restart apache2"Login to trilio unit and run "sudo dpkg --configure -a"
It will ask for user input, hit enter and log out from the unit.
From mass node run command "juju resolve <trilio unit name>"workloadmgr project-quota-type-listworkloadmgr project-quota-type-show <quota_type_id>workloadmgr project-allowed-quota-create --quota-type-id quota_type_id
--allowed-value allowed_value
--high-watermark high_watermark
--project-id project_idworkloadmgr project-allowed-quota-list <project_id>workloadmgr project-allowed-quota-show <allowed_quota_id>workloadmgr project-allowed-quota-update [--allowed-value <allowed_value>]
[--high-watermark <high_watermark>]
[--project-id <project_id>]
<allowed_quota_id>workloadmgr project-allowed-quota-delete <allowed_quota_id>






# For RHOSP13
systemctl disable tripleo_trilio_dmapi.service
systemctl stop tripleo_trilio_dmapi.service
docker stop trilio_dmapi
# For RHOSP16 onwards
systemctl disable tripleo_trilio_dmapi.service
systemctl stop tripleo_trilio_dmapi.service
podman stop trilio_dmapi# For RHOSP13
docker rm trilio_dmapi
docker rm trilio_datamover_api_init_log
docker rm trilio_datamover_api_db_sync
# For RHOSP16 onwards
podman rm trilio_dmapi
podman rm trilio_datamover_api_init_log
podman rm trilio_datamover_api_db_syncrm -rf /var/lib/config-data/puppet-generated/triliodmapi
rm /var/lib/config-data/puppet-generated/triliodmapi.md5sumrm -rf /var/log/containers/trilio-datamover-api/# For RHOSP13
docker stop trilio_datamover
# For RHOSP16 onwards
systemctl disable tripleo_trilio_datamover.service
systemctl stop tripleo_trilio_datamover.service
podman stop trilio_datamover# For RHOSP13
docker rm trilio_datamover
# For RHOSP16 onwards
podman rm trilio_datamover## Following steps applicable for all supported RHOSP releases.
# Check triliovault backup target mount point
mount | grep trilio
# Unmount it
-- If it's NFS (COPY UUID_DIR from your compute host using above command)
umount /var/lib/nova/triliovault-mounts/<UUID_DIR>
-- If it's S3
umount /var/lib/nova/triliovault-mounts
# Verify that it's unmounted
mount | grep trilio
df -h | grep trilio
# Remove mount point directory after verifying that backup target unmounted successfully.
# Otherwise actual data from backup target may get cleaned.
rm -rf /var/lib/nova/triliovault-mountsrm -rf /var/lib/config-data/puppet-generated/triliodm/
rm /var/lib/config-data/puppet-generated/triliodm.md5sumrm -rf /var/log/containers/trilio-datamover/listen trilio_datamover_api
bind 172.25.3.60:13784 transparent ssl crt /etc/pki/tls/private/overcloud_endpoint.pem
bind 172.25.3.60:8784 transparent
http-request set-header X-Forwarded-Proto https if { ssl_fc }
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
http-request set-header X-Forwarded-Port %[dst_port]
option httpchk
option httplog
server overcloud-controller-0.internalapi.localdomain 172.25.3.59:8784 check fall 5 inter 2000 rise 2# For RHOSP13
docker restart haproxy-bundle-docker-0
# For RHOSP16 onwards
podman restart haproxy-bundle-podman-0openstack service delete dmapi
openstack user delete dmapi## On RHOSP13, run following command on node where database service runs
docker exec -ti -u root galera-bundle-docker-0 mysql -u root
## On RHOSP16
podman exec -it galera-bundle-podman-0 mysql -u root## Clean database
DROP DATABASE dmapi;
## Clean dmapi user
=> List 'dmapi' user accounts
MariaDB [mysql]> select user, host from mysql.user where user='dmapi';
+-------+-------------+
| user | host |
+-------+-------------+
| dmapi | 172.25.2.10 |
| dmapi | 172.25.2.8 |
+-------+-------------+
2 rows in set (0.00 sec)
=> Delete those user accounts
MariaDB [mysql]> DROP USER [email protected];
Query OK, 0 rows affected (0.82 sec)
MariaDB [mysql]> DROP USER [email protected];
Query OK, 0 rows affected (0.05 sec)
=> Verify that dmapi user got cleaned
MariaDB [mysql]> select user, host from mysql.user where user='dmapi';
Empty set (0.00 sec)virsh listvirsh destroy <Trilio VM Name or ID>virsh undefine <Trilio VM name>workloadmgr policy-listworkloadmgr policy-show <policy_id>workloadmgr policy-create --policy-fields <key=key-name>
[--display-description <display_description>]
[--metadata <key=key-name>]
<display_name>workloadmgr policy-update [--display-name <display-name>]
[--display-description <display-description>]
[--policy-fields <key=key-name>]
[--metadata <key=key-name>]
<policy_id>workloadmgr policy-assign [--add_project <project_id>]
[--remove_project <project_id>]
<policy_id>workloadmgr policy-delete <policy_id>HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Mon, 09 Nov 2020 13:23:25 GMT
Content-Type: application/json
Content-Length: 244
Connection: keep-alive
X-Compute-Request-Id: req-bdfd3fb8-5cbf-4108-885f-63160426b2fa
{
"file_search":{
"created_at":"2020-11-09T13:23:25.698534",
"updated_at":null,
"id":14,
"deleted_at":null,
pip3 install --upgrade pip
pip3 download pipexport PIP_EXTRA_INDEX_URL=https://pypi.fury.io/triliodata-4-1/pip3 download --extra-index-url $PIP_EXTRA_INDEX_URL s3fuse --no-cache-dir --no-deps#First install dependent package configobj
pip3 install configobj
pip3 download --extra-index-url $PIP_EXTRA_INDEX_URL tvault-configurator --no-cache-dir --no-deps#First install dependent package jinja2
pip3 install jinja2
pip3 download --extra-index-url $PIP_EXTRA_INDEX_URL workloadmgr --no-cache-dir --no-depspip3 download --extra-index-url $PIP_EXTRA_INDEX_URL workloadmgrclient --no-cache-dir --no-depspip3 download --extra-index-url $PIP_EXTRA_INDEX_URL contegoclient --no-cache-dir --no-depspip3 download oslo.messaging==12.1.6 --no-depstar -czvf tvault_backup.tar.gz /etc/tvault /etc/tvault-config /etc/workloadmgr /home/stack/myansible/lib/python3.6/site-packages/tvault_configurator/conf/users.json
cp tvault_backup.tar.gz /root/source /home/stack/myansible/bin/activatepip3 install --upgrade pip-<downloaded-version>-py3-none-any.whl --no-depssystemctl stop tvault-object-store
pip3 install --upgrade s3fuse-<HF_VERSION>.tar.gz --no-deps
rm -rf /var/triliovault/*pip3 install --upgrade tvault_configurator-<HF_VERSION>.tar.gz --no-depspip3 install --upgrade workloadmgr-<HF_VERSION>.tar.gz --no-depspip3 install --upgrade workloadmgrclient-<HF_VERSION>.tar.gz --no-depspip3 install --upgrade contegoclient-<HF_VERSION>.tar.gz --no-depspip3 install ./oslo.messaging-12.1.6-py3-none-any.whl --no-depssource /home/stack/myansible/bin/activate
pip3 list | grep <package_name> cd /root
tar -xzvf tvault_backup.tar.gz -C /systemctl restart tvault-object-store
systemctl restart wlm-api
systemctl restart wlm-scheduler
systemctl restart wlm-workloads
systemctl restart tvault-config pcs resource enable wlm-cron
## run below command to check status of wlm-cron
pcs status## On All nodes
systemctl status wlm-api wlm-scheduler wlm-workloads tvault-config tvault-object-store | grep -E 'Active|loaded'
## On primary node
pcs status
systemctl status wlm-cronps -ef | grep workloadmgr-cron | grep -v grep
## Above command should show only 2 processes running; sample below
[root@tvm6 ~]# ps -ef | grep workloadmgr-cron | grep -v grep
nova 8841 1 2 Jul28 ? 00:40:44 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-cron --config-file=/etc/workloadmgr/workloadmgr.conf
nova 8898 8841 0 Jul28 ? 00:07:03 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-cron --config-file=/etc/workloadmgr/workloadmgr.confHTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Mon, 09 Nov 2020 13:24:28 GMT
Content-Type: application/json
Content-Length: 819
Connection: keep-alive
X-Compute-Request-Id: req-d57bea9a-9968-4357-8743-e0b906466063
{
"file_search":{
"created_at":"2020-11-09T13:23:25.000000",
"updated_at":"2020-11-09T13:23:48.000000",
"id":14,
"deleted_at":null,
"status":"completed",
"error_msg":null,
"filepath":"/etc/h*",
"json_resp":"[
{
"ed4f29e8-7544-4e1c-af8a-a76031211926":[
{
"/dev/vda1":[
"/etc/hostname",
"/etc/hosts"
],
"/etc/hostname":{
"dev":"2049",
"ino":"32",
"mode":"33204",
"nlink":"1",
"uid":"0",
"gid":"0",
"rdev":"0",
"size":"1",
"blksize":"1024",
"blocks":"2",
"atime":"1603455255",
"mtime":"1603455255",
"ctime":"1603455255"
},
"/etc/hosts":{
"dev":"2049",
"ino":"127",
"mode":"33204",
"nlink":"1",
"uid":"0",
"gid":"0",
"rdev":"0",
"size":"37",
"blksize":"1024",
"blocks":"2",
"atime":"1603455257",
"mtime":"1431011050",
"ctime":"1431017172"
}
}
]
}
]",
"vm_id":"08dab61c-6efd-44d3-a9ed-8e789d338c1b"
}
}{
"file_search":{
"start":<Integer>,
"end":<Integer>,
"filepath":"<Reg-Ex String>",
"date_from":<Date Format: YYYY-MM-DDTHH:MM:SS>,
"date_to":<Date Format: YYYY-MM-DDTHH:MM:SS>,
"snapshot_ids":[
"<Snapshot-ID>"
],
"vm_id":"<VM-ID>"
}
}
HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 11 Nov 2020 15:29:03 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-9d779802-9c65-463a-973c-39cdffcba82e[root@TVM2 ~]# pcs resource disable wlm-cron
[root@TVM2 ~]# systemctl status wlm-cron
● wlm-cron.service - workload's scheduler cron service
Loaded: loaded (/etc/systemd/system/wlm-cron.service; disabled; vendor preset : disabled)
Active: inactive (dead)
Jun 11 08:27:06 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:06 - INFO - 1...t
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 140686268624368 Child 11389 ki...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - 1...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Shutting down thread pool
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...l
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Stopping the threads
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...s
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: All threads are stopped succes...y
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - A...y
Jun 11 08:27:09 TVM2 systemd[1]: Stopped workload's scheduler cron service.
Hint: Some lines were ellipsized, use -l to show in full.
[root@TVM2 ~]# pcs resource show wlm-cron
Resource: wlm-cron (class=systemd type=wlm-cron)
Meta Attrs: target-role=Stopped
Operations: monitor interval=30s on-fail=restart timeout=300s (wlm-cron-monito r-interval-30s)
start interval=0s on-fail=restart timeout=300s (wlm-cron-start-int erval-0s)
stop interval=0s timeout=300s (wlm-cron-stop-interval-0s)
[root@TVM2 ~]# ps -ef | grep -i workloadmgr-cron
root 15379 14383 0 08:27 pts/0 00:00:00 grep --color=auto -i workloadmgr -cron
deb [trusted=yes] https://apt.fury.io/triliodata-4-1/ /apt-get update
apt list --upgradable[triliovault-4-1]
name=triliovault-4-1
baseurl=http://trilio:[email protected]:8283/triliodata-4-1/yum/
gpgcheck=0
enabled=1yum repolist
yum check-upgradelxc-attach -n controller_dmapi
tar -czvf dmapi_config.tar.gz /etc/dmapi
apt list --upgradable
apt install python3-dmapi --upgrade
tar -xzvf dmapi_config.tar.gz -C /
systemctl restart tvault-datamover-api
systemctl status tvault-datamover-apilxc-attach -n controller_dmapi
tar -czvf dmapi_config.tar.gz /etc/dmapi
yum list installed | grep dmapi
yum check-update python3-dmapi
yum upgrade python3-dmapi
tar -xzvf dmapi_config.tar.gz -C /
systemctl restart tvault-datamover-api
systemctl status tvault-datamover-apilxc-attach -n controller_horizon_container-ead7cc60
apt list --upgradable
apt install python3-tvault-horizon-plugin --upgrade
apt install python3-workloadmgrclient --upgrade
apt install python3-contegoclient --upgrade
systemctl restart apache2
workloadmgr --versionlxc-attach -n controller_horizon_container-ead7cc60
yum list installed | grep trilio
yum upgrade python3-contegoclient-el8 python3-tvault-horizon-plugin-el8 python3-workloadmgrclient-el8
systemctl restart httpd
workloadmgr --versiontar -czvf contego_config.tar.gz /etc/tvault-contego/
apt list --upgradable
(NFS only) umount /var/triliovault-mounts/<base64-hash>
(S3 only) umount /var/triliovault-mounts
apt install python3-tvault-contego --upgrade
apt install python3-s3-fuse-plugin --upgrade
tar -xzvf contego_config.tar.gz -C /
systemctl restart tvault-object-store
systemctl restart tvault-contego
systemctl status tvault-contego
df -htar -czvf contego_config.tar.gz /etc/tvault-contego/
yum list installed | grep tvault
yum upgrade python3-tvault-contego
yum upgrade python3-s3fuse-plugin
tar -xzvf contego_config.tar.gz -C /
systemctl restart tvault-object-store
systemctl restart tvault-contego
systemctl status tvault-contego
df -hretries 5
timeout http-request 10m
timeout queue 10m
timeout connect 10m
timeout client 10m
timeout server 10m
timeout check 10m
balance roundrobin
maxconn 50000haproxy_extra_services:
- service:
haproxy_service_name: datamover_service
haproxy_backend_nodes: "{{ groups['dmapi_all'] | default([]) }}"
haproxy_ssl: "{{ haproxy_ssl }}"
haproxy_port: 8784
haproxy_balance_type: http
haproxy_backend_options:
- "httpchk GET / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"# Datamover haproxy setting
haproxy_extra_services:
- service:
haproxy_service_name: datamover_service
haproxy_backend_nodes: "{{ groups['dmapi_all'] | default([]) }}"
haproxy_ssl: "{{ haproxy_ssl }}"
haproxy_port: 8784
haproxy_balance_type: http
haproxy_balance_alg: roundrobin
haproxy_timeout_client: 10m
haproxy_timeout_server: 10m
haproxy_backend_options:
- "httpchk GET / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"cd /opt/openstack-ansible/playbooks
openstack-ansible haproxy-install.ymlcurl -O https://raw.githubusercontent.com/trilioData/triliovault-cfg-scripts/master/common/nova_userid.sh
chmod +x nova_userid.sh
vi nova_userid.sh # change nova user_id and group_id to uid & gid present on compute nodes.
./nova_userid.sh
id novagit clone -b <branch> https://github.com/trilioData/triliovault-cfg-scripts.gitcd triliovault-cfg-scripts/
cp -R ansible/roles/* /opt/openstack-ansible/playbooks/roles/
cp ansible/main-install.yml /opt/openstack-ansible/playbooks/os-tvault-install.yml
cp ansible/environments/group_vars/all/vars.yml /etc/openstack_deploy/user_tvault_vars.yml- import_playbook: os-tvault-install.yml# Datamover haproxy setting
haproxy_extra_services:
- service:
haproxy_service_name: datamover_service
haproxy_backend_nodes: "{{ groups['dmapi_all'] | default([]) }}"
haproxy_ssl: "{{ haproxy_ssl }}"
haproxy_port: 8784
haproxy_balance_type: http
haproxy_balance_alg: roundrobin
haproxy_timeout_client: 10m
haproxy_timeout_server: 10m
haproxy_backend_options:
- "httpchk GET / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"cat > /opt/openstack-ansible/inventory/env.d/tvault-dmapi.yml
component_skel:
dmapi_api:
belongs_to:
- dmapi_all
container_skel:
dmapi_container:
belongs_to:
- tvault-dmapi_containers
contains:
- dmapi_api
physical_skel:
tvault-dmapi_containers:
belongs_to:
- all_containers
tvault-dmapi_hosts:
belongs_to:
- hosts#tvault-dmapi
tvault-dmapi_hosts: # Add controller details in this section as tvault DMAPI is resides on controller nodes.
infra-1: # controller host name.
ip: 172.26.0.3 # Ip address of controller
infra-2: # If we have multiple controllers add controllers details in same manner as shown in Infra-2
ip: 172.26.0.4
#tvault-datamover
tvault_compute_hosts: # Add compute details in this section as tvault datamover is resides on compute nodes.
infra-1: # compute host name.
ip: 172.26.0.7 # Ip address of compute node
infra-2: # If we have multiple compute nodes add compute details in same manner as shown in Infra-2
ip: 172.26.0.8##common editable parameters required for installing tvault-horizon-plugin, tvault-contego and tvault-datamover-api
#ip address of TVM
IP_ADDRESS: sample_tvault_ip_address
##Time Zone
TIME_ZONE: "Etc/UTC"
#Update TVAULT package version here, we will install mentioned version plugins for Example# TVAULT_PACKAGE_VERSION: 3.3.36
TVAULT_PACKAGE_VERSION: 4.1.94 #GA build version
# Update Openstack dist code name like ussuri etc.
OPENSTACK_DIST: ussuri
#Need to add the following statement in nova sudoers file
#nova ALL = (root) NOPASSWD: /home/tvault/.virtenv/bin/privsep-helper *
#These changes require for Datamover, Otherwise Datamover will not work
#Are you sure? Please set variable to
# UPDATE_NOVA_SUDOERS_FILE: proceed
#other wise ansible tvault-contego installation will exit
UPDATE_NOVA_SUDOERS_FILE: proceed
##### Select snapshot storage type #####
#Details for NFS as snapshot storage , NFS_SHARES should begin with "-".
##True/False
NFS: False
NFS_SHARES:
- sample_nfs_server_ip1:sample_share_path
- sample_nfs_server_ip2:sample_share_path
#if NFS_OPTS is empty then default value will be "nolock,soft,timeo=180,intr,lookupcache=none"
NFS_OPTS: ""
#### Details for S3 as snapshot storage
##True/False
S3: False
VAULT_S3_ACCESS_KEY: sample_s3_access_key
VAULT_S3_SECRET_ACCESS_KEY: sample_s3_secret_access_key
VAULT_S3_REGION_NAME: sample_s3_region_name
VAULT_S3_BUCKET: sample_s3_bucket
VAULT_S3_SIGNATURE_VERSION: default
#### S3 Specific Backend Configurations
#### Provide one of follwoing two values in s3_type variable, string's case should be match
#Amazon/Other_S3_Compatible
s3_type: sample_s3_type
#### Required field(s) for all S3 backends except Amazon
VAULT_S3_ENDPOINT_URL: ""
#True/False
VAULT_S3_SECURE: True
VAULT_S3_SSL_CERT: ""
###details of datamover API
##If SSL is enabled "DMAPI_ENABLED_SSL_APIS" value should be dmapi.
#DMAPI_ENABLED_SSL_APIS: dmapi
##If SSL is disabled "DMAPI_ENABLED_SSL_APIS" value should be empty.
DMAPI_ENABLED_SSL_APIS: ""
DMAPI_SSL_CERT: ""
DMAPI_SSL_KEY: ""
#### Any service is using Ceph Backend then set ceph_backend_enabled value to True
#True/False
ceph_backend_enabled: False
#Set verbosity level and run playbooks with -vvv option to display custom debug messages
verbosity_level: 3cd /opt/openstack-ansible/playbooks
# To create Dmapi container
openstack-ansible lxc-containers-create.yml
#To Deploy Trilio Components
openstack-ansible os-tvault-install.yml
#To configure Haproxy for Dmapi
openstack-ansible haproxy-install.ymlopenstack-ansible setup-infrastructure.yml --syntax-check
openstack-ansible setup-hosts.yml
openstack-ansible setup-infrastructure.yml
openstack-ansible setup-openstack.ymllxc-ls # Check the dmapi container is present on controller node.
lxc-info -s controller_dmapi_container-a11984bf # Confirm running status of the containersystemctl status tvault-contego.service
systemctl status tvault-objest-store # If Storage backend is S3
df -h # Verify the mount point is mounted on compute node(s)lxc-attach -n controller_horizon_container-1d9c055c # To login on horizon container
apt list | egrep 'tvault-horizon-plugin|workloadmgrclient|contegoclient' # For ubuntu based container
yum list installed |egrep 'tvault-horizon-plugin|workloadmgrclient|contegoclient' # For CentOS based container haproxy -c -V -f /etc/haproxy/haproxy.cfg # Verify the keyword datamover_service-back is present in output.openstack image create \
--file <File Manager Image Path> \
--container-format bare \
--disk-format qcow2 \
--public \
--property hw_qemu_guest_agent=yes \
--property tvault_recovery_manager=yes \
--property hw_disk_bus=virtio \
tvault-file-managerguest-file-read
guest-file-write
guest-file-open
guest-file-closeSELINUX=disabledyum install python3 lvm2apt-get update
apt-get install qemu-guest-agent
systemctl enable qemu-guest-agentLoaded: loaded (/etc/init.d/qemu-guest-agent; generated)DAEMON_ARGS="-F/etc/qemu/fsfreeze-hook"Loaded: loaded (/usr/lib/systemd/system/qemu-guest-agent.service; disabled; vendor preset: enabled)systemctl edit qemu-guest-agent[Service]
ExecStart=
ExecStart=/usr/sbin/qemu-ga -F/etc/qemu/fsfreeze-hooksystemctl restart qemu-guest-agentapt-get install python3workloadmgr snapshot-mount <snapshot_id> <mount_vm_id>workloadmgr snapshot-mounted-list [--workloadid <workloadid>]workloadmgr snapshot-dismount <snapshot_id>systemctl restart wlm-apisystemctl restart wlm-schedulersystemctl restart wlm-workloadspcs resource restart wlm-cronpcs resource restart lb_nginx-clone[root@TVM1 ~]# rabbitmqctl stop
Stopping and halting node rabbit@TVM1 ...
[root@TVM1 ~]# rabbitmq-server -detached
Warning: PID file not written; -detached was passed.
[root@TVM1 ~]# rabbitmqctl cluster_status
Cluster status of node rabbit@TVM1 ...
[{nodes,[{disc,[rabbit@TVM1,rabbit@TVM2,rabbit@TVM3]}]},
{running_nodes,[rabbit@TVM2,rabbit@TVM3,rabbit@TVM1]},
{cluster_name,<<"rabbit@TVM1">>},
{partitions,[{rabbit@TVM2,[rabbit@TVM1,rabbit@TVM3]},
{rabbit@TVM3,[rabbit@TVM1,rabbit@TVM2]}]},
{alarms,[{rabbit@TVM2,[]},{rabbit@TVM3,[]},{rabbit@TVM1,[]}]}]systemctl stop mysqld
systemctl start mysqldsystemctl stop mysqldcat /var/lib/mysql/grastate.dat
# GALERA saved state
version: 2.1
uuid: 353e129f-11f2-11eb-b3f7-76f39b7b455d
seqno: 213576545367
safe_to_bootstrap: 1galera_new_clustersystemctl start mysqldMariaDB [(none)]> show status like 'wsrep_incoming_addresses';
+--------------------------+-------------------------------------------------+
| Variable_name | Value |
+--------------------------+-------------------------------------------------+
| wsrep_incoming_addresses | 10.10.2.13:3306,10.10.2.14:3306,10.10.2.12:3306 |
+--------------------------+-------------------------------------------------+
1 row in set (0.01 sec)
MariaDB [(none)]> show status like 'wsrep_cluster_size';
+--------------------+-------+
| Variable_name | Value |
+--------------------+-------+
| wsrep_cluster_size | 3 |
+--------------------+-------+
1 row in set (0.00 sec)
MariaDB [(none)]> show status like 'wsrep_cluster_state_uuid';
+--------------------------+--------------------------------------+
| Variable_name | Value |
+--------------------------+--------------------------------------+
| wsrep_cluster_state_uuid | 353e129f-11f2-11eb-b3f7-76f39b7b455d |
+--------------------------+--------------------------------------+
1 row in set (0.00 sec)
MariaDB [(none)]> show status like 'wsrep_local_state_comment';
+---------------------------+--------+
| Variable_name | Value |
+---------------------------+--------+
| wsrep_local_state_comment | Synced |
+---------------------------+--------+
1 row in set (0.01 sec)juju ssh <workloadmgr unit name>/<unit-number>
Systemctl restart wlm-api wlm-scheduler wlm-workloads wlm-cronjuju ssh <workloadmgr unit name>/<unit-number>
Systemctl restart wlm-api wlm-scheduler wlm-workloadsjuju ssh <workloadmgr unit name>/<unit-number>
crm_resource --restart -r res_trilio_wlm_wlm_crondocker restart trilio_dmapipodman restart trilio_dmapijuju ssh <trilio-dm-api unit name>/<unit-number>
sudo systemctl restart tvault-datamover-apidocker restart triliovault_datamover_apilxc-stop -n <dmapi container name>
lxc-start -n <dmapi container name>docker restart trilio_datamoverpodman restart trilio_datamoverjuju ssh <trilio-data-mover unit name>/<unit-number>
sudo systemctl restart tvault-contegodocker restart triliovault_datamoverservice tvault-contego restart{
"mount":{
"mount_vm_id":"15185195-cd8d-4f6f-95ca-25983a34ed92",
"options":{
}
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 11 Nov 2020 15:44:42 GMT
Content-Type: application/json
Content-Length: 228
Connection: keep-alive
X-Compute-Request-Id: req-04c6ef90-125c-4a36-9603-af1af001006a
{
"mounted_snapshots":[
{
"snapshot_id":"ed4f29e8-7544-4e1c-af8a-a76031211926",
"snapshot_name":"snapshot",
"workload_id":"4bafaa03-f69a-45d5-a6fc-ae0119c77974",
"mounturl":"[\"http://192.168.100.87\"]",
"status":"mounted"
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 11 Nov 2020 15:44:42 GMT
Content-Type: application/json
Content-Length: 228
Connection: keep-alive
X-Compute-Request-Id: req-04c6ef90-125c-4a36-9603-af1af001006a
{
"mounted_snapshots":[
{
"snapshot_id":"ed4f29e8-7544-4e1c-af8a-a76031211926",
"snapshot_name":"snapshot",
"workload_id":"4bafaa03-f69a-45d5-a6fc-ae0119c77974",
"mounturl":"[\"http://192.168.100.87\"]",
"status":"mounted"
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 11 Nov 2020 16:03:49 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-abf69be3-474d-4cf3-ab41-caa56bb611e4{
"mount":
{
"options": null
}
}cd /home/stack
git clone -b hotfix-13-TVO/4.1 https://github.com/trilioData/triliovault-cfg-scripts.git
cd triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/cp s3-cert.pem /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/puppet/trilio/files/cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/scripts/
chmod +x *.sh
./upload_puppet_module.sh
## Output of above command looks like following.
Creating tarball...
Tarball created.
Creating heat environment file: /home/stack/.tripleo/environments/puppet-modules-url.yaml
Uploading file to swift: /tmp/puppet-modules-8Qjya2X/puppet-modules.tar.gz
+-----------------------+---------------------+----------------------------------+
| object | container | etag |
+-----------------------+---------------------+----------------------------------+
| puppet-modules.tar.gz | overcloud-artifacts | 368951f6a4d39cfe53b5781797b133ad |
+-----------------------+---------------------+----------------------------------+
## Above command creates following file.
ls -ll /home/stack/.tripleo/environments/puppet-modules-url.yaml
'OS::TripleO::Services::TrilioDatamoverApi''OS::TripleO::Services::TrilioDatamover' Trilio Datamover container: docker.io/trilio/tripleo-train-centos7-trilio-datamover:<HOTFIX-TAG-VERSION>
Trilio Datamover Api Container: docker.io/trilio/tripleo-train-centos7-trilio-datamover-api:<HOTFIX-TAG-VERSION>
Trilio horizon plugin: docker.io/trilio/tripleo-train-centos7-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>Trilio Datamover container: docker.io/trilio/tripleo-train-centos8-trilio-datamover:<HOTFIX-TAG-VERSION>
Trilio Datamover Api Container: docker.io/trilio/tripleo-train-centos8-trilio-datamover-api:<HOTFIX-TAG-VERSION>
Trilio horizon plugin: docker.io/trilio/tripleo-train-centos8-trilio-horizon-plugin:<HOTFIX-TAG-VERSION># For TripleO Train Centos7
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: docker.io/trilio/tripleo-train-centos7-trilio-datamover:<HOTFIX-TAG-VERSION>
DockerTrilioDmApiImage: docker.io/trilio/tripleo-train-centos7-trilio-datamover-api:<HOTFIX-TAG-VERSION>
DockerHorizonImage: docker.io/trilio/tripleo-train-centos7-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>
# For Tripleo Train Centos8
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: docker.io/trilio/tripleo-train-centos8-trilio-datamover:<HOTFIX-TAG-VERSION>
DockerTrilioDmApiImage: docker.io/trilio/tripleo-train-centos8-trilio-datamover-api:<HOTFIX-TAG-VERSION>
ContainerHorizonImage: docker.io/trilio/tripleo-train-centos8-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>
cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/scripts/
sudo ./prepare_trilio_images.sh <undercloud_registry_hostname_or_ip> <OS_platform> <4.1-TRIPLEO-CONTAINER> <container_tool_available_on_undercloud>
## To get undercloud registry hostname/ip, we have two approaches. Use either one.
1. openstack tripleo container image list
2. find your 'containers-prepare-parameter.yaml' (from overcloud deploy command) and search for 'push_destination'
cat /home/stack/containers-prepare-parameter.yaml | grep push_destination
- push_destination: "undercloud.ctlplane.ooo.prod1:8787"
Here, 'undercloud.ctlplane.ooo.prod1' is undercloud registry hostname. Use it in our command like following example.
# Command Example:
sudo ./prepare_trilio_images.sh undercloud.ctlplane.ooo.prod1 centos7 4.1.94-hotfix-1-tripleo podman
## Verify changes
# For TripleO Train Centos7
$ grep 'Image' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: prod1-undercloud.demo:8787/trilio/tripleo-train-centos7-trilio-datamover:<HOTFIX-TAG-VERSION>
DockerTrilioDmApiImage: prod1-undercloud.demo:8787/trilio/tripleo-train-centos7-trilio-datamover-api:<HOTFIX-TAG-VERSION>
DockerHorizonImage: prod1-undercloud.demo:8787/trilio/tripleo-train-centos7-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>
# For Tripleo Train Centos8
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: prod1-undercloud.demo:8787/trilio/tripleo-train-centos8-trilio-datamover:<HOTFIX-TAG-VERSION>
DockerTrilioDmApiImage: prod1-undercloud.demo:8787/trilio/tripleo-train-centos8-trilio-datamover-api:<HOTFIX-TAG-VERSION>
ContainerHorizonImage: prod1-undercloud.demo:8787/trilio/tripleo-train-centos8-trilio-horizon-plugin:<HOTFIX-TAG-VERSION>## For Centos7 Train
(undercloud) [stack@undercloud redhat-director-scripts/tripleo-train]$ openstack tripleo container image list | grep trilio
| docker://undercloud.ctlplane.localdomain:8787/trilio/tripleo-train-centos7-trilio-datamover:<HOTFIX-TAG-VERSION> | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/tripleo-train-centos7-trilio-datamover-api:<HOTFIX-TAG-VERSION> | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/tripleo-train-centos7-trilio-horizon-plugin:<HOTFIX-TAG-VERSION> |
-----------------------------------------------------------------------------------------------------
## For Centos8 Train
(undercloud) [stack@undercloud redhat-director-scripts/tripleo-train]$ openstack tripleo container image list | grep trilio
| docker://undercloud.ctlplane.localdomain:8787/trilio/tripleo-train-centos8-trilio-datamover:<HOTFIX-TAG-VERSION> | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/tripleo-train-centos8-trilio-datamover-api:<HOTFIX-TAG-VERSION> | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/tripleo-train-centos8-trilio-horizon-plugin:<HOTFIX-TAG-VERSION> |
openstack overcloud deploy --templates \
-e /home/stack/templates/node-info.yaml \
-e /home/stack/templates/overcloud_images.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/environments/trilio_env.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/enable-tls.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/inject-trust-anchor.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/tripleo-train/environments/trilio_env_tls_endpoints_public_dns.yaml \
--ntp-server 192.168.1.34 \
--libvirt-type qemu \
--log-file overcloud_deploy.log \
-r /usr/share/openstack-tripleo-heat-templates/roles_data.yaml[root@overcloud-controller-0 heat-admin]# podman ps | grep trilio
26fcb9194566 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION> kolla_start 5 days ago Up 5 days ago trilio_dmapi
094971d0f5a9 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION> kolla_start 5 days ago Up 5 days ago horizon/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg[root@overcloud-novacompute-0 heat-admin]# podman ps | grep trilio
b1840444cc59 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION> kolla_start 5 days ago Up 5 days ago trilio_datamover[root@overcloud-controller-0 heat-admin]# podman ps | grep horizon
094971d0f5a9 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION> kolla_start 5 days ago Up 5 days ago horizonLearn about Trilio Support for OpenStack Distributions
workloadmgr snapshot-list [--workload_id <workload_id>]
[--tvault_node <host>]
[--date_from <date_from>]
[--date_to <date_to>]
[--all {True,False}]workloadmgr workload-snapshot [--full] [--display-name <display-name>]
[--display-description <display-description>]
<workload_id>workloadmgr snapshot-show [--output <output>] <snapshot_id>workloadmgr snapshot-delete <snapshot_id>workloadmgr snapshot-cancel <snapshot_id>systemctl | grep wlm
wlm-api.service loaded active running workloadmanager api service
wlm-cron.service loaded active running Cluster Controlled wlm-cron
wlm-scheduler.service loaded active running Cluster Controlled wlm-scheduler
wlm-workloads.service loaded active running workloadmanager workloads servicesystemctl status wlm-api
######
● wlm-api.service - workloadmanager api service
Loaded: loaded (/etc/systemd/system/wlm-api.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2021-11-02 19:41:19 UTC; 2 months 21 days ago
Main PID: 4688 (workloadmgr-api)
CGroup: /system.slice/wlm-api.service
├─4688 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-api --config-file=/etc/workloadmgr/workloadmgr.confsystemctl status wlm-scheduler
######
● wlm-scheduler.service - Cluster Controlled wlm-scheduler
Loaded: loaded (/etc/systemd/system/wlm-scheduler.service; disabled; vendor preset: disabled)
Drop-In: /run/systemd/system/wlm-scheduler.service.d
└─50-pacemaker.conf
Active: active (running) since Sat 2022-01-22 13:49:28 UTC; 1 day 23h ago
Main PID: 9342 (workloadmgr-sch)
CGroup: /system.slice/wlm-scheduler.service
└─9342 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-scheduler --config-file=/etc/workloadmgr/workloadmgr.confsystemctl status wlm-workloads
######
● wlm-workloads.service - workloadmanager workloads service
Loaded: loaded (/etc/systemd/system/wlm-workloads.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2021-11-02 19:51:05 UTC; 2 months 21 days ago
Main PID: 606 (workloadmgr-wor)
CGroup: /system.slice/wlm-workloads.service
├─ 606 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-workloads --config-file=/etc/workloadmgr/workloadmgr.confsystemctl status wlm-cron
######
● wlm-cron.service - Cluster Controlled wlm-cron
Loaded: loaded (/etc/systemd/system/wlm-cron.service; disabled; vendor preset: disabled)
Drop-In: /run/systemd/system/wlm-cron.service.d
└─50-pacemaker.conf
Active: active (running) since Sat 2022-01-22 13:49:28 UTC; 1 day 23h ago
Main PID: 9209 (workloadmgr-cro)
CGroup: /system.slice/wlm-cron.service
├─9209 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-cron --config-file=/etc/workloadmgr/workloadmgr.confpcs status
######
Cluster name: triliovault
WARNINGS:
Corosync and pacemaker node names do not match (IPs used in setup?)
Stack: corosync
Current DC: TVM1 (version 1.1.21-4.el7-f14e36fd43) - partition with quorum
Last updated: Mon Jan 24 13:42:01 2022
Last change: Tue Nov 2 19:07:04 2021 by root via crm_resource on TVM2
3 nodes configured
9 resources configured
Online: [ TVM1 TVM2 TVM3 ]
Full list of resources:
virtual_ip (ocf::heartbeat:IPaddr2): Started TVM2
virtual_ip_public (ocf::heartbeat:IPaddr2): Started TVM2
virtual_ip_admin (ocf::heartbeat:IPaddr2): Started TVM2
virtual_ip_internal (ocf::heartbeat:IPaddr2): Started TVM2
wlm-cron (systemd:wlm-cron): Started TVM2
wlm-scheduler (systemd:wlm-scheduler): Started TVM2
Clone Set: lb_nginx-clone [lb_nginx]
Started: [ TVM2 ]
Stopped: [ TVM1 TVM3 ]
Daemon Status:
corosync: active/enabled
pacemaker: active/enabled
pcsd: active/enabledcurl http://10.10.2.34:8780/v1/8e16700ae3614da4ba80a4e57d60cdb9/workload_types/detail -X GET -H "X-Auth-Project-Id: admin" -H "User-Agent: python-workloadmgrclient" -H "Accept: application/json" -H "X-Auth-Token: gAAAAABe40NVFEtJeePpk1F9QGGh1LiGnHJVLlgZx9t0HRrK9rC5vqKZJRkpAcW1oPH6Q9K9peuHiQrBHEs1-g75Na4xOEESR0LmQJUZP6n37fLfDL_D-hlnjHJZ68iNisIP1fkm9FGSyoyt6IqjO9E7_YVRCTCqNLJ67ZkqHuJh1CXwShvjvjwroot@ansible:~# openstack endpoint list | grep dmapi
| 190db2ce033e44f89de73abcbf12804e | US-WEST-2 | dmapi | datamover | True | public | https://osa-victoria-ubuntu20-2.triliodata.demo:8784/v2 |
| dec1a323791b49f0ac7901a2dc806ee2 | US-WEST-2 | dmapi | datamover | True | admin | http://10.10.10.154:8784/v2 |
| f8c4162c9c1246ffb0190d0d093c48af | US-WEST-2 | dmapi | datamover | True | internal | http://10.10.10.154:8784/v2 |
root@ansible:~# curl http://10.10.10.154:8784
{"versions": [{"id": "v2.0", "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2011-01-21T11:33:21Z", "links": [{"rel": "self", "href": "hlxc-attach -n <dmapi-container-name> (go to dmapi conatiner)
root@controller-dmapi-container-08df1e06:~# systemctl status tvault-datamover-api.service
● tvault-datamover-api.service - TrilioData DataMover API service
Loaded: loaded (/lib/systemd/system/tvault-datamover-api.service; enabled; vendor preset: enabled)
Active: active (running) since Wed 2022-01-12 11:53:39 UTC; 1 day 17h ago
Main PID: 23888 (dmapi-api)
Tasks: 289 (limit: 57729)
Memory: 607.7M
CGroup: /system.slice/tvault-datamover-api.service
├─23888 /usr/bin/python3 /usr/bin/dmapi-api
├─23893 /usr/bin/python3 /usr/bin/dmapi-api
├─23894 /usr/bin/python3 /usr/bin/dmapi-api
├─23895 /usr/bin/python3 /usr/bin/dmapi-api
├─23896 /usr/bin/python3 /usr/bin/dmapi-api
├─23897 /usr/bin/python3 /usr/bin/dmapi-api
├─23898 /usr/bin/python3 /usr/bin/dmapi-api
├─23899 /usr/bin/python3 /usr/bin/dmapi-api
├─23900 /usr/bin/python3 /usr/bin/dmapi-api
├─23901 /usr/bin/python3 /usr/bin/dmapi-api
├─23902 /usr/bin/python3 /usr/bin/dmapi-api
├─23903 /usr/bin/python3 /usr/bin/dmapi-api
├─23904 /usr/bin/python3 /usr/bin/dmapi-api
├─23905 /usr/bin/python3 /usr/bin/dmapi-api
├─23906 /usr/bin/python3 /usr/bin/dmapi-api
├─23907 /usr/bin/python3 /usr/bin/dmapi-api
└─23908 /usr/bin/python3 /usr/bin/dmapi-api
Jan 12 11:53:39 controller-dmapi-container-08df1e06 systemd[1]: Started TrilioData DataMover API service.
Jan 12 11:53:40 controller-dmapi-container-08df1e06 dmapi-api[23888]: Could not load
Jan 12 11:53:40 controller-dmapi-container-08df1e06 dmapi-api[23888]: Could not load
root@compute:~# systemctl status tvault-contego
● tvault-contego.service - Tvault contego
Loaded: loaded (/etc/systemd/system/tvault-contego.service; enabled; vendor preset: enabled)
Active: active (running) since Fri 2022-01-14 05:45:19 UTC; 2s ago
Main PID: 1489651 (python3)
Tasks: 19 (limit: 67404)
Memory: 6.7G (max: 10.0G)
CGroup: /system.slice/tvault-contego.service
├─ 998543 /bin/qemu-nbd -c /dev/nbd45 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,file>
├─ 998772 /bin/qemu-nbd -c /dev/nbd73 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,file>
├─ 998931 /bin/qemu-nbd -c /dev/nbd100 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,fil>
├─ 999147 /bin/qemu-nbd -c /dev/nbd35 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,file>
├─1371322 /bin/qemu-nbd -c /dev/nbd63 --object secret,id=sec0,data=payload-test1 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,fil>
├─1371524 /bin/qemu-nbd -c /dev/nbd91 --object secret,id=sec0,data=payload-test1 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,fil>
└─1489651 /openstack/venvs/nova-22.3.1/bin/python3 /usr/bin/tvault-contego --config-file=/etc/nova/nova.conf --config-file=/etc/tvault-contego/tvault-cont>
Jan 14 05:45:19 compute systemd[1]: Started Tvault contego.
Jan 14 05:45:20 compute sudo[1489653]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/openstack/venvs/nova-22.3.1/bin/nova-rootwrap /etc/nova/rootwrap.conf umou>
Jan 14 05:45:20 compute sudo[1489653]: pam_unix(sudo:session): session opened for user root by (uid=0)
Jan 14 05:45:21 compute python3[1489655]: umount: /var/triliovault-mounts/VHJpbGlvVmF1bHQ=: no mount point specified.
Jan 14 05:45:21 compute sudo[1489653]: pam_unix(sudo:session): session closed for user root
Jan 14 05:45:21 compute tvault-contego[1489651]: 2022-01-14 05:45:21.499 1489651 INFO __main__ [req-48c32a39-38d0-45b9-9852-931e989133c6 - - - - -] CPU Control group m>
Jan 14 05:45:21 compute tvault-contego[1489651]: 2022-01-14 05:45:21.499 1489651 INFO __main__ [req-48c32a39-38d0-45b9-9852-931e989133c6 - - - - -] I/O Control Group m>
lines 1-22/22 (END)root@ansible:~# openstack endpoint list | grep dmapi
| 190db2ce033e44f89de73abcbf12804e | US-WEST-2 | dmapi | datamover | True | public | https://osa-victoria-ubuntu20-2.triliodata.demo:8784/v2 |
| dec1a323791b49f0ac7901a2dc806ee2 | US-WEST-2 | dmapi | datamover | True | admin | http://10.10.10.154:8784/v2 |
| f8c4162c9c1246ffb0190d0d093c48af | US-WEST-2 | dmapi | datamover | True | internal | http://10.10.10.154:8784/v2 |
root@ansible:~# curl http://10.10.10.154:8784
{"versions": [{"id": "v2.0", "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2011-01-21T11:33:21Z", "links": [{"rel": "self", "href": "h[root@controller ~]# docker ps | grep triliovault_datamover_api
3f979c15cedc trilio/centos-binary-trilio-datamover-api:4.2.50-victoria "dumb-init --single-…" 3 days ago Up 3 days triliovault_datamover_api[root@compute1 ~]# docker ps | grep triliovault_datamover
2f1ece820a59 trilio/centos-binary-trilio-datamover:4.2.50-victoria "dumb-init --single-…" 3 days ago Up 3 days triliovault_datamover[root@controller ~]# docker ps | grep horizon
4a004c786d47 trilio/centos-binary-trilio-horizon-plugin:4.2.50-victoria "dumb-init --single-…" 3 days ago Up 3 days (unhealthy) horizonroot@jujumaas:~# juju status | grep trilio
trilio-data-mover 4.2.51 active 3 trilio-data-mover jujucharms 9 ubuntu
trilio-dm-api 4.2.51 active 1 trilio-dm-api jujucharms 7 ubuntu
trilio-horizon-plugin 4.2.51 active 1 trilio-horizon-plugin jujucharms 6 ubuntu
trilio-wlm 4.2.51 active 1 trilio-wlm jujucharms 9 ubuntu
trilio-data-mover/8 active idle 172.17.1.5 Unit is ready
trilio-data-mover/6 active idle 172.17.1.6 Unit is ready
trilio-data-mover/7* active idle 172.17.1.7 Unit is ready
trilio-horizon-plugin/2* active idle 172.17.1.16 Unit is ready
trilio-dm-api/2* active idle 1/lxd/4 172.17.1.27 8784/tcp Unit is ready
trilio-wlm/2* active idle 7 172.17.1.28 8780/tcp Unit is readyOn rhosp13 OS: docker ps | grep trilio-
On other (rhosp16 onwards/tripleo) : podman ps | grep trilio-
[root@overcloudtrain1-controller-0 heat-admin]# podman ps | grep trilio-
e3530d6f7bec ucqa161.ctlplane.trilio.local:8787/trilio/trilio-datamover-api:4.2.47-rhosp16.1 kolla_start 2 weeks ago Up 2 weeks ago trilio_dmapi
f93f7019f934 ucqa161.ctlplane.trilio.local:8787/trilio/trilio-horizon-plugin:4.2.47-rhosp16.1 kolla_start 2 weeks ago Up 2 weeks ago horizonOn rhosp13 OS: docker ps | grep trilio-
On other (rhosp/tripleo) : podman ps | grep trilio-
[root@overcloudtrain3-novacompute-1 heat-admin]# podman ps | grep trilio-
4419b02e075c undercloud162.ctlplane.trilio.local:8787/trilio/trilio-datamover:dev-osp16.2-1-rhosp16.2 kolla_start 2 days ago Up 27 seconds ago trilio_datamover (overcloudtrain1) [stack@ucqa161 ~]$ openstack endpoint list | grep datamover
| 218b2f92569a4d259839fa3ea4d6103a | regionOne | dmapi | datamover | True | internal | https://overcloudtrain1internalapi.trilio.local:8784/v2 |
| 4702c51aa5c24bed853e736499e194e2 | regionOne | dmapi | datamover | True | public | https://overcloudtrain1.trilio.local:13784/v2 |
| c8169025eb1e4954ab98c7abdb0f53f6 | regionOne | dmapi | datamover | True | admin | https://overcloudtrain1internalapi.trilio.local:8784/v2 systemctl | grep wlm
wlm-api.service loaded active running workloadmanager api service
wlm-cron.service loaded active running Cluster Controlled wlm-cron
wlm-scheduler.service loaded active running Cluster Controlled wlm-scheduler
wlm-workloads.service loaded active running workloadmanager workloads servicesystemctl status wlm-api
######
● wlm-api.service - workloadmanager api service
Loaded: loaded (/etc/systemd/system/wlm-api.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2021-11-02 19:41:19 UTC; 2 months 21 days ago
Main PID: 4688 (workloadmgr-api)
CGroup: /system.slice/wlm-api.service
├─4688 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-api --config-file=/etc/workloadmgr/workloadmgr.confsystemctl status wlm-scheduler
######
● wlm-scheduler.service - Cluster Controlled wlm-scheduler
Loaded: loaded (/etc/systemd/system/wlm-scheduler.service; disabled; vendor preset: disabled)
Drop-In: /run/systemd/system/wlm-scheduler.service.d
└─50-pacemaker.conf
Active: active (running) since Sat 2022-01-22 13:49:28 UTC; 1 day 23h ago
Main PID: 9342 (workloadmgr-sch)
CGroup: /system.slice/wlm-scheduler.service
└─9342 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-scheduler --config-file=/etc/workloadmgr/workloadmgr.confsystemctl status wlm-workloads
######
● wlm-workloads.service - workloadmanager workloads service
Loaded: loaded (/etc/systemd/system/wlm-workloads.service; enabled; vendor preset: disabled)
Active: active (running) since Tue 2021-11-02 19:51:05 UTC; 2 months 21 days ago
Main PID: 606 (workloadmgr-wor)
CGroup: /system.slice/wlm-workloads.service
├─ 606 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-workloads --config-file=/etc/workloadmgr/workloadmgr.confsystemctl status wlm-cron
######
● wlm-cron.service - Cluster Controlled wlm-cron
Loaded: loaded (/etc/systemd/system/wlm-cron.service; disabled; vendor preset: disabled)
Drop-In: /run/systemd/system/wlm-cron.service.d
└─50-pacemaker.conf
Active: active (running) since Sat 2022-01-22 13:49:28 UTC; 1 day 23h ago
Main PID: 9209 (workloadmgr-cro)
CGroup: /system.slice/wlm-cron.service
├─9209 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-cron --config-file=/etc/workloadmgr/workloadmgr.confpcs status
######
Cluster name: triliovault
WARNINGS:
Corosync and pacemaker node names do not match (IPs used in setup?)
Stack: corosync
Current DC: TVM1 (version 1.1.21-4.el7-f14e36fd43) - partition with quorum
Last updated: Mon Jan 24 13:42:01 2022
Last change: Tue Nov 2 19:07:04 2021 by root via crm_resource on TVM2
3 nodes configured
9 resources configured
Online: [ TVM1 TVM2 TVM3 ]
Full list of resources:
virtual_ip (ocf::heartbeat:IPaddr2): Started TVM2
virtual_ip_public (ocf::heartbeat:IPaddr2): Started TVM2
virtual_ip_admin (ocf::heartbeat:IPaddr2): Started TVM2
virtual_ip_internal (ocf::heartbeat:IPaddr2): Started TVM2
wlm-cron (systemd:wlm-cron): Started TVM2
wlm-scheduler (systemd:wlm-scheduler): Started TVM2
Clone Set: lb_nginx-clone [lb_nginx]
Started: [ TVM2 ]
Stopped: [ TVM1 TVM3 ]
Daemon Status:
corosync: active/enabled
pacemaker: active/enabled
pcsd: active/enabledcurl http://10.10.2.34:8780/v1/8e16700ae3614da4ba80a4e57d60cdb9/workload_types/detail -X GET -H "X-Auth-Project-Id: admin" -H "User-Agent: python-workloadmgrclient" -H "Accept: application/json" -H "X-Auth-Token: gAAAAABe40NVFEtJeePpk1F9QGGh1LiGnHJVLlgZx9t0HRrK9rC5vqKZJRkpAcW1oPH6Q9K9peuHiQrBHEs1-g75Na4xOEESR0LmQJUZP6n37fLfDL_D-hlnjHJZ68iNisIP1fkm9FGSyoyt6IqjO9E7_YVRCTCqNLJ67ZkqHuJh1CXwShvjvjwroot@ansible:~# openstack endpoint list | grep dmapi
| 190db2ce033e44f89de73abcbf12804e | US-WEST-2 | dmapi | datamover | True | public | https://osa-victoria-ubuntu20-2.triliodata.demo:8784/v2 |
| dec1a323791b49f0ac7901a2dc806ee2 | US-WEST-2 | dmapi | datamover | True | admin | http://10.10.10.154:8784/v2 |
| f8c4162c9c1246ffb0190d0d093c48af | US-WEST-2 | dmapi | datamover | True | internal | http://10.10.10.154:8784/v2 |
root@ansible:~# curl http://10.10.10.154:8784
{"versions": [{"id": "v2.0", "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2011-01-21T11:33:21Z", "links": [{"rel": "self", "href": "hlxc-attach -n <dmapi-container-name> (go to dmapi conatiner)
root@controller-dmapi-container-08df1e06:~# systemctl status tvault-datamover-api.service
● tvault-datamover-api.service - TrilioData DataMover API service
Loaded: loaded (/lib/systemd/system/tvault-datamover-api.service; enabled; vendor preset: enabled)
Active: active (running) since Wed 2022-01-12 11:53:39 UTC; 1 day 17h ago
Main PID: 23888 (dmapi-api)
Tasks: 289 (limit: 57729)
Memory: 607.7M
CGroup: /system.slice/tvault-datamover-api.service
├─23888 /usr/bin/python3 /usr/bin/dmapi-api
├─23893 /usr/bin/python3 /usr/bin/dmapi-api
├─23894 /usr/bin/python3 /usr/bin/dmapi-api
├─23895 /usr/bin/python3 /usr/bin/dmapi-api
├─23896 /usr/bin/python3 /usr/bin/dmapi-api
├─23897 /usr/bin/python3 /usr/bin/dmapi-api
├─23898 /usr/bin/python3 /usr/bin/dmapi-api
├─23899 /usr/bin/python3 /usr/bin/dmapi-api
├─23900 /usr/bin/python3 /usr/bin/dmapi-api
├─23901 /usr/bin/python3 /usr/bin/dmapi-api
├─23902 /usr/bin/python3 /usr/bin/dmapi-api
├─23903 /usr/bin/python3 /usr/bin/dmapi-api
├─23904 /usr/bin/python3 /usr/bin/dmapi-api
├─23905 /usr/bin/python3 /usr/bin/dmapi-api
├─23906 /usr/bin/python3 /usr/bin/dmapi-api
├─23907 /usr/bin/python3 /usr/bin/dmapi-api
└─23908 /usr/bin/python3 /usr/bin/dmapi-api
Jan 12 11:53:39 controller-dmapi-container-08df1e06 systemd[1]: Started TrilioData DataMover API service.
Jan 12 11:53:40 controller-dmapi-container-08df1e06 dmapi-api[23888]: Could not load
Jan 12 11:53:40 controller-dmapi-container-08df1e06 dmapi-api[23888]: Could not load
root@compute:~# systemctl status tvault-contego
● tvault-contego.service - Tvault contego
Loaded: loaded (/etc/systemd/system/tvault-contego.service; enabled; vendor preset: enabled)
Active: active (running) since Fri 2022-01-14 05:45:19 UTC; 2s ago
Main PID: 1489651 (python3)
Tasks: 19 (limit: 67404)
Memory: 6.7G (max: 10.0G)
CGroup: /system.slice/tvault-contego.service
├─ 998543 /bin/qemu-nbd -c /dev/nbd45 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,file>
├─ 998772 /bin/qemu-nbd -c /dev/nbd73 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,file>
├─ 998931 /bin/qemu-nbd -c /dev/nbd100 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,fil>
├─ 999147 /bin/qemu-nbd -c /dev/nbd35 --object secret,id=sec0,data=payload-1234 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,file>
├─1371322 /bin/qemu-nbd -c /dev/nbd63 --object secret,id=sec0,data=payload-test1 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,fil>
├─1371524 /bin/qemu-nbd -c /dev/nbd91 --object secret,id=sec0,data=payload-test1 --image-opts driver=qcow2,encrypt.format=luks,encrypt.key-secret=sec0,fil>
└─1489651 /openstack/venvs/nova-22.3.1/bin/python3 /usr/bin/tvault-contego --config-file=/etc/nova/nova.conf --config-file=/etc/tvault-contego/tvault-cont>
Jan 14 05:45:19 compute systemd[1]: Started Tvault contego.
Jan 14 05:45:20 compute sudo[1489653]: nova : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/openstack/venvs/nova-22.3.1/bin/nova-rootwrap /etc/nova/rootwrap.conf umou>
Jan 14 05:45:20 compute sudo[1489653]: pam_unix(sudo:session): session opened for user root by (uid=0)
Jan 14 05:45:21 compute python3[1489655]: umount: /var/triliovault-mounts/VHJpbGlvVmF1bHQ=: no mount point specified.
Jan 14 05:45:21 compute sudo[1489653]: pam_unix(sudo:session): session closed for user root
Jan 14 05:45:21 compute tvault-contego[1489651]: 2022-01-14 05:45:21.499 1489651 INFO __main__ [req-48c32a39-38d0-45b9-9852-931e989133c6 - - - - -] CPU Control group m>
Jan 14 05:45:21 compute tvault-contego[1489651]: 2022-01-14 05:45:21.499 1489651 INFO __main__ [req-48c32a39-38d0-45b9-9852-931e989133c6 - - - - -] I/O Control Group m>
lines 1-22/22 (END)root@ansible:~# openstack endpoint list | grep dmapi
| 190db2ce033e44f89de73abcbf12804e | US-WEST-2 | dmapi | datamover | True | public | https://osa-victoria-ubuntu20-2.triliodata.demo:8784/v2 |
| dec1a323791b49f0ac7901a2dc806ee2 | US-WEST-2 | dmapi | datamover | True | admin | http://10.10.10.154:8784/v2 |
| f8c4162c9c1246ffb0190d0d093c48af | US-WEST-2 | dmapi | datamover | True | internal | http://10.10.10.154:8784/v2 |
root@ansible:~# curl http://10.10.10.154:8784
{"versions": [{"id": "v2.0", "status": "SUPPORTED", "version": "", "min_version": "", "updated": "2011-01-21T11:33:21Z", "links": [{"rel": "self", "href": "h[root@controller ~]# docker ps | grep triliovault_datamover_api
3f979c15cedc trilio/centos-binary-trilio-datamover-api:4.2.50-victoria "dumb-init --single-…" 3 days ago Up 3 days triliovault_datamover_api[root@compute1 ~]# docker ps | grep triliovault_datamover
2f1ece820a59 trilio/centos-binary-trilio-datamover:4.2.50-victoria "dumb-init --single-…" 3 days ago Up 3 days triliovault_datamover[root@controller ~]# docker ps | grep horizon
4a004c786d47 trilio/centos-binary-trilio-horizon-plugin:4.2.50-victoria "dumb-init --single-…" 3 days ago Up 3 days (unhealthy) horizonroot@jujumaas:~# juju status | grep trilio
trilio-data-mover 4.2.51 active 3 trilio-data-mover jujucharms 9 ubuntu
trilio-dm-api 4.2.51 active 1 trilio-dm-api jujucharms 7 ubuntu
trilio-horizon-plugin 4.2.51 active 1 trilio-horizon-plugin jujucharms 6 ubuntu
trilio-wlm 4.2.51 active 1 trilio-wlm jujucharms 9 ubuntu
trilio-data-mover/8 active idle 172.17.1.5 Unit is ready
trilio-data-mover/6 active idle 172.17.1.6 Unit is ready
trilio-data-mover/7* active idle 172.17.1.7 Unit is ready
trilio-horizon-plugin/2* active idle 172.17.1.16 Unit is ready
trilio-dm-api/2* active idle 1/lxd/4 172.17.1.27 8784/tcp Unit is ready
trilio-wlm/2* active idle 7 172.17.1.28 8780/tcp Unit is readyOn rhosp13 OS: docker ps | grep trilio-
On other (rhosp16 onwards/tripleo) : podman ps | grep trilio-
[root@overcloudtrain1-controller-0 heat-admin]# podman ps | grep trilio-
e3530d6f7bec ucqa161.ctlplane.trilio.local:8787/trilio/trilio-datamover-api:4.2.47-rhosp16.1 kolla_start 2 weeks ago Up 2 weeks ago trilio_dmapi
f93f7019f934 ucqa161.ctlplane.trilio.local:8787/trilio/trilio-horizon-plugin:4.2.47-rhosp16.1 kolla_start 2 weeks ago Up 2 weeks ago horizonOn rhosp13 OS: docker ps | grep trilio-
On other (rhosp/tripleo) : podman ps | grep trilio-
[root@overcloudtrain3-novacompute-1 heat-admin]# podman ps | grep trilio-
4419b02e075c undercloud162.ctlplane.trilio.local:8787/trilio/trilio-datamover:dev-osp16.2-1-rhosp16.2 kolla_start 2 days ago Up 27 seconds ago trilio_datamover (overcloudtrain1) [stack@ucqa161 ~]$ openstack endpoint list | grep datamover
| 218b2f92569a4d259839fa3ea4d6103a | regionOne | dmapi | datamover | True | internal | https://overcloudtrain1internalapi.trilio.local:8784/v2 |
| 4702c51aa5c24bed853e736499e194e2 | regionOne | dmapi | datamover | True | public | https://overcloudtrain1.trilio.local:13784/v2 |
| c8169025eb1e4954ab98c7abdb0f53f6 | regionOne | dmapi | datamover | True | admin | https://overcloudtrain1internalapi.trilio.local:8784/v2 RHOSP/TripleO: /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
Kolla Ansible with CentOS: /etc/pki/tls/certs/ca-bundle.crt
Kolla Ansible with Ubuntu: /usr/local/share/ca-certificates/
OpenStack Ansible (OSA) with Ubuntu in our lab: /etc/openstack_deploy/ssl/
OpenStack Asnible (OSA) with CentOS: /etc/openstack_deploy/ssl/etc/workloadmgr/ca-chain.pemcreate database workloadmgr_auto;
CREATE USER 'trilio'@'localhost' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON workloadmgr_auto.* TO 'trilio'@'10.10.10.67' IDENTIFIED BY 'password';mysql://trilio:[email protected]/workloadmgr_auto?charset=utf8[root@controller ~]# wget http://10.10.2.15:8085/yum-repo/queens/workloadmgrclient-4.0.115-4.0.noarch.rpm
--2021-03-08 15:36:37-- http://10.10.2.15:8085/yum-repo/queens/workloadmgrclient-4.0.115-4.0.noarch.rpm
Connecting to 10.10.2.15:8085... connected.
HTTP request sent, awaiting response... 200 OK
Length: 155976 (152K) [application/x-rpm]
Saving to: ‘workloadmgrclient-4.0.115-4.0.noarch.rpm’
100%[======================================>] 1,55,976 --.-K/s in 0.001s
2021-03-08 15:36:37 (125 MB/s) - ‘workloadmgrclient-4.0.115-4.0.noarch.rpm’ saved [155976/155976]
[root@controller ~]# yum install workloadmgrclient-4.0.115-4.0.noarch.rpm
Loaded plugins: fastestmirror
Examining workloadmgrclient-4.0.115-4.0.noarch.rpm: workloadmgrclient-4.0.115-4. 0.noarch
Marking workloadmgrclient-4.0.115-4.0.noarch.rpm to be installed
Resolving Dependencies
--> Running transaction check
---> Package workloadmgrclient.noarch 0:4.0.115-4.0 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================
Package Arch Version Repository Size
================================================================================
Installing:
workloadmgrclient
noarch 4.0.115-4.0 /workloadmgrclient-4.0.115-4.0.noarch 700 k
Transaction Summary
================================================================================
Install 1 Package
Total size: 700 k
Installed size: 700 k
Is this ok [y/d/N]: y
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : workloadmgrclient-4.0.115-4.0.noarch 1/1
Verifying : workloadmgrclient-4.0.115-4.0.noarch 1/1
Installed:
workloadmgrclient.noarch 0:4.0.115-4.0
Complete![trilio]
name=Trilio Repository
baseurl=http://trilio:[email protected]:8283/triliovault-<Trilio-Release>/yum/
enabled=1
gpgcheck=0[root@controller ~]# cat /etc/yum.repos.d/trilio.repo
[trilio]
name=Trilio Repository
baseurl=http://trilio:[email protected]:8283/triliovault-4.0/yum/
enabled=1
gpgcheck=0
[root@controller ~]# yum install workloadmgrclient
Loaded plugins: fastestmirror
Determining fastest mirrors
* base: centos-canada.vdssunucu.com.tr
* centos-ceph-nautilus: mirror.its.dal.ca
* centos-nfs-ganesha28: centos.mirror.colo-serv.net
* centos-openstack-train: centos-canada.vdssunucu.com.tr
* centos-qemu-ev: centos-canada.vdssunucu.com.tr
* extras: centos-canada.vdssunucu.com.tr
* updates: centos-canada.vdssunucu.com.tr
base | 3.6 kB 00:00:00
centos-ceph-nautilus | 3.0 kB 00:00:00
centos-nfs-ganesha28 | 3.0 kB 00:00:00
centos-openstack-train | 3.0 kB 00:00:00
centos-qemu-ev | 3.0 kB 00:00:00
extras | 2.9 kB 00:00:00
trilio | 2.9 kB 00:00:00
updates | 2.9 kB 00:00:00
(1/3): extras/7/x86_64/primary_db | 225 kB 00:00:00
(2/3): centos-openstack-train/7/x86_64/primary_db | 1.1 MB 00:00:00
(3/3): updates/7/x86_64/primary_db | 5.7 MB 00:00:00
Resolving Dependencies
--> Running transaction check
---> Package workloadmgrclient.noarch 0:4.0.116-4.0 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
===========================================================================================================================================================================================================================================================================================================================================================================================================================================
Package Arch Version Repository Size
===========================================================================================================================================================================================================================================================================================================================================================================================================================================
Installing:
workloadmgrclient noarch 4.0.116-4.0 trilio 152 k
Transaction Summary
===========================================================================================================================================================================================================================================================================================================================================================================================================================================
Install 1 Package
Total download size: 152 k
Installed size: 700 k
Is this ok [y/d/N]: y
Downloading packages:
workloadmgrclient-4.0.116-4.0.noarch.rpm | 152 kB 00:00:00
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : workloadmgrclient-4.0.116-4.0.noarch 1/1
Verifying : workloadmgrclient-4.0.116-4.0.noarch 1/1
Installed:
workloadmgrclient.noarch 0:4.0.116-4.0
Complete!root@ubuntu:~# curl -Og6 http://10.10.2.15:8085/deb-repo/deb-repo/python3-workloadmgrclient_4.0.115_all.deb
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 116k 100 116k 0 0 899k 0 --:--:-- --:--:-- --:--:-- 982k
root@ubuntu:~# apt-get install ./python3-workloadmgrclient_4.0.115_all.deb -y
Reading package lists... Done
Building dependency tree
Reading state information... Done
Note, selecting 'python3-workloadmgrclient' instead of './python3-workloadmgrclient_4.0.115_all.deb'
The following NEW packages will be installed:
python3-workloadmgrclient
0 upgraded, 1 newly installed, 0 to remove and 0 not upgraded.
Need to get 0 B/120 kB of archives.
After this operation, 736 kB of additional disk space will be used.
Selecting previously unselected package python3-workloadmgrclient.
(Reading database ... 65533 files and directories currently installed.)
Preparing to unpack .../python3-workloadmgrclient_4.0.115_all.deb ...
Unpacking python3-workloadmgrclient (4.0.115) ...
Setting up python3-workloadmgrclient (4.0.115) ...deb [trusted=yes] https://apt.fury.io/triliodata-<Trilio-Version>/ /root@ubuntu:~# cat /etc/apt/sources.list.d/fury.list
deb [trusted=yes] https://apt.fury.io/triliodata-4-0/ /
root@ubuntu:~# apt update
Hit:1 http://security.ubuntu.com/ubuntu bionic-security InRelease
Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease
Hit:3 http://archive.ubuntu.com/ubuntu bionic-updates InRelease
Hit:4 http://archive.ubuntu.com/ubuntu bionic-backports InRelease
Ign:5 https://apt.fury.io/triliodata-4-0 InRelease
Ign:6 https://apt.fury.io/triliodata-4-0 Release
Ign:7 https://apt.fury.io/triliodata-4-0 Packages
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Get:7 https://apt.fury.io/triliodata-4-0 Packages
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Ign:8 https://apt.fury.io/triliodata-4-0 Translation-en
Fetched 84.0 kB in 12s (6930 B/s)
Reading package lists... Done
Building dependency tree
Reading state information... Done
All packages are up to date.
root@ubuntu:~# apt-get install python3-workloadmgrclient
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following NEW packages will be installed:
python3-workloadmgrclient
0 upgraded, 1 newly installed, 0 to remove and 0 not upgraded.
Need to get 0 B/120 kB of archives.
After this operation, 736 kB of additional disk space will be used.
Selecting previously unselected package python3-workloadmgrclient.
(Reading database ... 65533 files and directories currently installed.)
Preparing to unpack .../python3-workloadmgrclient_4.0.115_all.deb ...
Unpacking python3-workloadmgrclient (4.0.115) ...
Setting up python3-workloadmgrclient (4.0.115) ...--workload-type-id➡️Workload Type ID is required--instance <instance-id=instance-uuid>➡️Specify an instance to include in the workload. Specify option multiple times to include multiple instances. instance-id: include the instance with this UUIDnova user and group ids have changed to '42436'workloadmgr workload-list [--all {True,False}] [--nfsshare <nfsshare>]workloadmgr workload-create --instance <instance-id=instance-uuid>
[--display-name <display-name>]
[--display-description <display-description>]
[--workload-type-id <workload-type-id>]
[--source-platform <source-platform>]
[--jobschedule <key=key-name>]
[--metadata <key=key-name>]
[--policy-id <policy_id>]workloadmgr workload-show <workload_id> [--verbose <verbose>]usage: workloadmgr workload-modify [--display-name <display-name>]
[--display-description <display-description>]
[--instance <instance-id=instance-uuid>]
[--jobschedule <key=key-name>]
[--metadata <key=key-name>]
[--policy-id <policy_id>]
<workload_id>workloadmgr workload-delete [--database_only <True/False>] <workload_id>workloadmgr workload-unlock <workload_id>workloadmgr workload-reset <workload_id>pcs resource disable wlm-cron -cron
[root@TVM2 ~]# systemctl status wlm-cron
● wlm-cron.service - workload's scheduler cron service
Loaded: loaded (/etc/systemd/system/wlm-cron.service; disabled; vendor preset : disabled)
Active: inactive (dead)
Jun 11 08:27:06 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:06 - INFO - 1...t
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 140686268624368 Child 11389 ki...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - 1...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Shutting down thread pool
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...l
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Stopping the threads
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...s
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: All threads are stopped succes...y
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - A...y
Jun 11 08:27:09 TVM2 systemd[1]: Stopped workload's scheduler cron service.
Hint: Some lines were ellipsized, use -l to show in full.
[root@TVM2 ~]# pcs resource show wlm-cron
Resource: wlm-cron (class=systemd type=wlm-cron)
Meta Attrs: target-role=Stopped
Operations: monitor interval=30s on-fail=restart timeout=300s (wlm-cron-monito r-interval-30s)
start interval=0s on-fail=restart timeout=300s (wlm-cron-start-int erval-0s)
stop interval=0s timeout=300s (wlm-cron-stop-interval-0s)
[root@TVM2 ~]# ps -ef | grep -i workloadmgr-cron
root 15379 14383 0 08:27 pts/0 00:00:00 grep --color=auto -i workloadmgr tar -czvf tvault_backup.tar.gz /etc/tvault /etc/tvault-config /etc/workloadmgr
cp tvault_backup.tar.gz /root/ ls -al /home/stack/myansible_3.8yum-config-manager --disable bintray-rabbitmq-server
yum-config-manager --disable mariadb
yum -y groupinstall "Development Tools"
yum -y install openssl-devel bzip2-devel libffi-devel xz-devel
wget https://www.python.org/ftp/python/3.8.12/Python-3.8.12.tgz
tar xvf Python-3.8.12.tgz
cd Python-3.8*/
./configure --enable-optimizations
sudo make altinstall
# Create the Python3.8 virtual env
cd /home/stack/
virtualenv -p /usr/local/bin/python3.8 myansible_3.8 --system-site-packages
source /home/stack/myansible_3.8/bin/activate
pip3 install pip --upgrade
pip3 install setuptools --upgrade
pip3 install jinja2 ansible>=2.9.0 configobj pbrsource /home/stack/myansible/bin/activatepip3 uninstall ansiblepip3 install --upgrade pipexport PIP_EXTRA_INDEX_URL=https://pypi.fury.io/triliodata-4-1/source /home/stack/myansible/bin/activate
systemctl stop tvault-object-store
pip3 install --extra-index-url $PIP_EXTRA_INDEX_URL s3fuse --upgrade --no-cache-dir
rm -rf /var/triliovault/*source /home/stack/myansible/bin/activate
systemctl stop tvault-object-store
pip3 install --extra-index-url $PIP_EXTRA_INDEX_URL s3fuse --upgrade --no-cache-dir --no-deps
rm -rf /var/triliovault/*source /home/stack/myansible_3.8/bin/activate
pip3 install --extra-index-url $PIP_EXTRA_INDEX_URL tvault-configurator --upgrade --no-cache-dirsource /home/stack/myansible_3.8/bin/activate
pip3 install --extra-index-url $PIP_EXTRA_INDEX_URL tvault-configurator --upgrade --no-cache-dirERROR: Command errored out with exit status 1:
command: /home/stack/myansible/bin/python3 -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-install-crie5qno/ansible_086eb28a1523443f802ab202398d361e/setup.py'"'"'; __file__='"'"'/tmp/pip-install-crie5qno/ansible_086eb28a1523443f802ab202398d361e/setup.py'"'"';f = getattr(tokenize, '"'"'open'"'"', open)(__file__) if os.path.exists(__file__) else io.StringIO('"'"'from setuptools import setup; setup()'"'"');code = f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' egg_info --egg-base /tmp/pip-pip-egg-info-pdd9x77v
cwd: /tmp/pip-install-crie5qno/ansible_086eb28a1523443f802ab202398d361e/source /home/stack/myansible/bin/activate
pip3 install --extra-index-url $PIP_EXTRA_INDEX_URL workloadmgr --upgrade --no-cache-dirsource /home/stack/myansible/bin/activate
pip3 install --extra-index-url $PIP_EXTRA_INDEX_URL workloadmgr --upgrade --no-cache-dir --no-depssource /home/stack/myansible/bin/activate
pip3 install --extra-index-url $PIP_EXTRA_INDEX_URL workloadmgrclient --upgrade --no-cache-dirsource /home/stack/myansible/bin/activate
pip3 install --extra-index-url $PIP_EXTRA_INDEX_URL workloadmgrclient --upgrade --no-cache-dir --no-depssource /home/stack/myansible/bin/activate
pip3 install --extra-index-url $PIP_EXTRA_INDEX_URL contegoclient --upgrade --no-cache-dirsource /home/stack/myansible/bin/activate
pip3 install --extra-index-url $PIP_EXTRA_INDEX_URL contegoclient --upgrade --no-cache-dir --no-depssource /home/stack/myansible/bin/activate
pip3 install oslo.messaging==12.1.6 --no-depscd /root
tar -xzvf tvault_backup.tar.gz -C /pcs resource delete wlm-schedulersystemctl restart tvault-object-store
systemctl restart wlm-api
systemctl restart wlm-scheduler
systemctl restart wlm-workloads
systemctl restart tvault-configpcs resource enable wlm-cron
pcs resource restart wlm-cronsystemctl status wlm-api wlm-scheduler wlm-workloads tvault-config tvault-object-store | grep -E 'Active|loaded'
pcs statussystemctl status wlm-cron
ps -ef | grep [w]orkloadmgr-cron[root@tvm6 ~]# ps -ef | grep [w]orkloadmgr-cron
nova 8841 1 2 Jul28 ? 00:40:44 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-cron --config-file=/etc/workloadmgr/workloadmgr.conf
nova 8898 8841 0 Jul28 ? 00:07:03 /home/stack/myansible/bin/python3 /home/stack/myansible/bin/workloadmgr-cron --config-file=/etc/workloadmgr/workloadmgr.confmount --bind /var/lib/nova/triliovault-mounts /var/triliovault-mountsmount --bind /var/trilio/triliovault-mounts /var/triliovault-mountsecho "/var/lib/nova/triliovault-mounts /var/triliovault-mounts none bind 0 0" >> /etc/fstabecho "/var/trilio/triliovault-mounts /var/triliovault-mounts none bind 0 0" >> /etc/fstab[root@TVM1 ~]# id nova
uid=42436(nova) gid=42436(nova) groups=42436(nova),990(libvirt),36(kvm)## Download the shell script
$ curl -O https://raw.githubusercontent.com/trilioData/triliovault-cfg-scripts/master/common/nova_userid.sh
## Assign executable permissions
$ chmod +x nova_userid.sh
## Execute the shell script to change 'nova' user and group id to '42436'
$ ./nova_userid.sh
## Ignore any errors and verify that 'nova' user and group id has changed to '42436'
$ id nova
uid=42436(nova) gid=42436(nova) groups=42436(nova),990(libvirt),36(kvm)

HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 11:52:56 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-99f51825-9b47-41ea-814f-8f8141157fc7[root@TVM2 ~]# pcs resource disable wlm-cron
[root@TVM2 ~]# systemctl status wlm-cron
● wlm-cron.service - workload's scheduler cron service
Loaded: loaded (/etc/systemd/system/wlm-cron.service; disabled; vendor preset : disabled)
Active: inactive (dead)
Jun 11 08:27:06 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:06 - INFO - 1...t
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 140686268624368 Child 11389 ki...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - 1...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Shutting down thread pool
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...l
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Stopping the threads
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...s
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: All threads are stopped succes...y
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - A...y
Jun 11 08:27:09 TVM2 systemd[1]: Stopped workload's scheduler cron service.
Hint: Some lines were ellipsized, use -l to show in full.
[root@TVM2 ~]# pcs resource show wlm-cron
Resource: wlm-cron (class=systemd type=wlm-cron)
Meta Attrs: target-role=Stopped
Operations: monitor interval=30s on-fail=restart timeout=300s (wlm-cron-monito r-interval-30s)
start interval=0s on-fail=restart timeout=300s (wlm-cron-start-int erval-0s)
stop interval=0s timeout=300s (wlm-cron-stop-interval-0s)
[root@TVM2 ~]# ps -ef | grep -i workloadmgr-cron
root 15379 14383 0 08:27 pts/0 00:00:00 grep --color=auto -i workloadmgr -cron
cd /home/stack
mv triliovault-cfg-scripts triliovault-cfg-scripts-old
git clone -b hotfix-13-TVO/4.1 https://github.com/trilioData/triliovault-cfg-scripts.git
cd triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/cp s3-cert.pem /home/stack/triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/puppet/trilio/files/cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/scripts/
./upload_puppet_module.sh
## Output of above command looks like following.
Creating tarball...
Tarball created.
Creating heat environment file: /home/stack/.tripleo/environments/puppet-modules-url.yaml
Uploading file to swift: /tmp/puppet-modules-8Qjya2X/puppet-modules.tar.gz
+-----------------------+---------------------+----------------------------------+
| object | container | etag |
+-----------------------+---------------------+----------------------------------+
| puppet-modules.tar.gz | overcloud-artifacts | 368951f6a4d39cfe53b5781797b133ad |
+-----------------------+---------------------+----------------------------------+
## Above command creates following file.
ls -ll /home/stack/.tripleo/environments/puppet-modules-url.yaml
Trilio Datamove container: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
Trilio Datamover Api Container: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
Trilio horizon plugin: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13Trilio Datamover container: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
Trilio Datamover Api Container: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
Trilio horizon plugin: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1Trilio Datamover container: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
Trilio Datamover Api Container: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
Trilio horizon plugin: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2# For RHOSP13
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
DockerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13
# For RHOSP16.1
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1
# For RHOSP16.2
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
ContainerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp13/scripts/
./prepare_trilio_images.sh <undercloud_ip/hostname> <HOTFIX-TAG-VERSION>-rhosp13
$ grep 'Image' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: 172.25.2.2:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
DockerTrilioDmApiImage: 172.25.2.2:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
DockerHorizonImage: 172.25.2.2:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/scripts/
sudo ./prepare_trilio_images.sh <undercloud_ip/hostname> <HOTFIX-TAG-VERSION>-rhosp16.1
$ grep 'Image' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.2/scripts/
sudo ./prepare_trilio_images.sh <undercloud_ip/hostname> <HOTFIX-TAG-VERSION>-rhosp16.2
$ grep 'Image' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
DockerTrilioDmApiImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
ContainerHorizonImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2(undercloud) [stack@undercloud redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>]$ openstack tripleo container image list | grep trilio
| docker://undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1 | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1 | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1 |
-----------------------------------------------------------------------------------------------------
(undercloud) [stack@undercloud redhat-director-scripts]$ grep 'Image' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1$ grep 'Image' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
DockerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
ContainerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2RHOSP13: /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp13/environments/trilio_env.yaml
RHOSP16.1: /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/environments/trilio_env.yaml
RHOSP16.2: /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.2/environments/trilio_env.yamlopenstack overcloud deploy --templates \
-e /home/stack/templates/node-info.yaml \
-e /home/stack/templates/overcloud_images.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/environments/trilio_env.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/enable-tls.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/inject-trust-anchor.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>/environments/trilio_env_tls_endpoints_public_dns.yaml \
--ntp-server 192.168.1.34 \
--libvirt-type qemu \
--log-file overcloud_deploy.log \
-r /usr/share/openstack-tripleo-heat-templates/roles_data.yaml[root@overcloud-controller-0 heat-admin]# podman ps | grep trilio
26fcb9194566 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-REL-VERSION>-rhosp16.1 kolla_start 5 days ago Up 5 days ago trilio_dmapi
094971d0f5a9 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-REL-VERSION>-rhosp16.1 kolla_start 5 days ago Up 5 days ago horizon[root@overcloud-novacompute-0 heat-admin]# podman ps | grep trilio
b1840444cc59 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-REL-VERSION>-rhosp16.1 kolla_start 5 days ago Up 5 days ago trilio_datamover[root@overcloud-controller-0 heat-admin]# podman ps | grep horizon
094971d0f5a9 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-REL-VERSION>-rhosp16.1 kolla_start 5 days ago Up 5 days ago horizonHTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 12:06:01 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-4eb1863e-3afa-4a2c-b8e6-91a41fe37f78HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 12:31:49 GMT
Content-Type: application/json
Content-Length: 1223
Connection: keep-alive
X-Compute-Request-Id: req-c6f826a9-fff7-442b-8886-0770bb97c491
{
"scheduler_enabled":true,
"trust":{
"created_at":"2020-10-23T14:35:11.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"trust-002bcbaf-c16b-44e6-a9ef-9c1efbfa2e2c",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"value":"871ca24f38454b14b867338cb0e9b46c",
"description":"token id for user ccddc7e7a015487fa02920f4d4979779 project c76b3355a164498aa95ddbc960adc238",
"category":"identity",
"type":"trust_id",
"public":false,
"hidden":true,
"status":"available",
"metadata":[
{
"created_at":"2020-10-23T14:35:11.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"a3cc9a01-3d49-4ff8-ad8e-b12a7b3c68b0",
"settings_name":"trust-002bcbaf-c16b-44e6-a9ef-9c1efbfa2e2c",
"settings_project_id":"c76b3355a164498aa95ddbc960adc238",
"key":"role_name",
"value":"member"
}
]
},
"is_valid":true,
"scheduler_obj":{
"workload_id":"4bafaa03-f69a-45d5-a6fc-ae0119c77974",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"user_domain_id":"default",
"user":"ccddc7e7a015487fa02920f4d4979779",
"tenant":"c76b3355a164498aa95ddbc960adc238"
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 12:45:27 GMT
Content-Type: application/json
Content-Length: 30
Connection: keep-alive
X-Compute-Request-Id: req-cd447ce0-7bd3-4a60-aa92-35fc43b4729b
{"global_job_scheduler": true}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 12:49:29 GMT
Content-Type: application/json
Content-Length: 31
Connection: keep-alive
X-Compute-Request-Id: req-6f49179a-737a-48ab-91b7-7e7c460f5af0
{"global_job_scheduler": false}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 12:50:11 GMT
Content-Type: application/json
Content-Length: 30
Connection: keep-alive
X-Compute-Request-Id: req-ed279acc-9805-4443-af91-44a4420559bc
{"global_job_scheduler": true}git clone -b hotfix-13-TVO/4.1 https://github.com/trilioData/triliovault-cfg-scripts.git
cd triliovault-cfg-scripts/kolla-ansible/
# For Centos and Ubuntu
cp -R ansible/roles/triliovault /usr/local/share/kolla-ansible/ansible/roles/## For Centos and Ubuntu
## Take backup of globals.yml
cp /etc/kolla/globals.yml /opt/
## Append Trilio global variables to globals.yml
cat ansible/triliovault_globals.yml >> /etc/kolla/globals.yml## For Centos and Ubuntu
## Take backup of passwords.yml
cp /etc/kolla/passwords.yml /opt/
## Append Trilio global variables to passwords.yml
cat ansible/triliovault_passwords.yml >> /etc/kolla/passwords.yml
## For Centos and Ubuntu
## Take backup of site.yml
cp /usr/local/share/kolla-ansible/ansible/site.yml /opt/
## Append Trilio code to site.yml
cat ansible/triliovault_site.yml >> /usr/local/share/kolla-ansible/ansible/site.ymlFor example:
If your inventory file name path '/root/multinode' then use following command.
cat ansible/triliovault_inventory.txt >> /root/multinodedocker.io/trilio/<kolla_base_distro>-binary-trilio-datamover-api:<triliovault_tag>
docker.io/trilio/<kolla_base_distro>-binary-trilio-datamover:<triliovault_tag>
docker.io/trilio/<kolla_base_distro>-binary-trilio-horizon-plugin:<triliovault_tag>
nova_libvirt_default_volumes:
- "{{ node_config_directory }}/nova-libvirt/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro"
- "/run/:/run/:shared"
- "/dev:/dev"
- "/sys/fs/cgroup:/sys/fs/cgroup"
- "kolla_logs:/var/log/kolla/"
- "libvirtd:/var/lib/libvirt"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
- "
{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}
"
- "nova_libvirt_qemu:/etc/libvirt/qemu"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }
- "/var/trilio:/var/trilio:shared"nova_compute_default_volumes:
- "{{ node_config_directory }}/nova-compute/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro"
- "/run:/run:shared"
- "/dev:/dev"
- "kolla_logs:/var/log/kolla/"
- "
{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
- "libvirtd:/var/lib/libvirt"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
- "/var/trilio:/var/trilio:shared"nova_compute_ironic_default_volumes:
- "{{ node_config_directory }}/nova-compute-ironic/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
- "/var/trilio:/var/trilio:shared"kolla-ansible -i multinode pull --tags triliovaultkolla-ansible -i multinode deploy[root@controller ~]# docker ps | grep triliovault_datamover_api
f00781997bc3 trilio/centos-binary-trilio-datamover-api:<triliovualt_tag> "dumb-init --single-…" 2 minutes ago Up 2 minutes triliovault_datamover_api
[root@compute ~]# docker ps | grep triliovault_datamover
84831db5d215 trilio/centos-binary-trilio-datamover:<triliovualt_tag> "dumb-init --single-…" 5 minutes ago Up 4 minutes triliovault_datamover
[root@controller ~]# docker ps | grep horizon
f3647e0fff27 trilio/centos-binary-trilio-horizon-plugin:<triliovualt_tag> "dumb-init --single-…" 8 minutes ago Up 8 minutes horizondocker ps -a | grep triliodocker logs trilio_datamover_api
docker logs trilio_datamoverdocker ps | grep horizon/var/log/kolla/triliovault-datamover-api/dmapi.log/var/log/kolla/triliovault-datamover/tvault-contego.log## Download the shell script
$ curl -O https://raw.githubusercontent.com/trilioData/triliovault-cfg-scripts/master/common/nova_userid.sh
## Assign executable permissions
$ chmod +x nova_userid.sh
## Execute the shell script to change 'nova' user and group id to '42436'
$ ./nova_userid.sh
## Ignore any errors and verify that 'nova' user and group id has changed to '42436'
$ id nova
uid=42436(nova) gid=42436(nova) groups=42436(nova),990(libvirt),36(kvm)workloadmgr setting-create [--description <description>]
[--category <category>]
[--type <type>]
[--is-public {True,False}]
[--is-hidden {True,False}]
[--metadata <key=value>]
<name> <value>workloadmgr setting-update [--description <description>]
[--category <category>]
[--type <type>]
[--is-public {True,False}]
[--is-hidden {True,False}]
[--metadata <key=value>]
<name> <value>workloadmgr setting-show [--get_hidden {True,False}] <setting_name>workloadmgr setting-delete <setting_name>workloadmgr get-global-job-schedulerworkloadmgr disable-global-job-schedulerworkloadmgr enable-global-job-schedulerHTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 04 Feb 2021 11:55:43 GMT
Content-Type: application/json
Content-Length: 403
Connection: keep-alive
X-Compute-Request-Id: req-ac16c258-7890-4ae7-b7f4-015b5aa4eb99
{
"settings":[
{
"created_at":"2021-02-04T11:55:43.890855",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"smtp_port",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":null,
"value":"8080",
"description":null,
"category":null,
"type":"email_settings",
"public":false,
"hidden":0,
"status":"available",
"is_public":false,
"is_hidden":false
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 04 Feb 2021 12:01:27 GMT
Content-Type: application/json
Content-Length: 380
Connection: keep-alive
X-Compute-Request-Id: req-404f2808-7276-4c2b-8870-8368a048c28c
{
"setting":{
"created_at":"2021-02-04T11:55:43.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"smtp_port",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":null,
"value":"8080",
"description":null,
"category":null,
"type":"email_settings",
"public":false,
"hidden":false,
"status":"available",
"metadata":[
]
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 04 Feb 2021 12:05:59 GMT
Content-Type: application/json
Content-Length: 403
Connection: keep-alive
X-Compute-Request-Id: req-e92e2c38-b43a-4046-984e-64cea3a0281f
{
"settings":[
{
"created_at":"2021-02-04T11:55:43.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"smtp_port",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":null,
"value":"8080",
"description":null,
"category":null,
"type":"email_settings",
"public":false,
"hidden":0,
"status":"available",
"is_public":false,
"is_hidden":false
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 04 Feb 2021 11:49:17 GMT
Content-Type: application/json
Content-Length: 1223
Connection: keep-alive
X-Compute-Request-Id: req-5a8303aa-6c90-4cd9-9b6a-8c200f9c2473{
"settings":[
{
"category":null,
"name":<String Setting_name>,
"is_public":false,
"is_hidden":false,
"metadata":{
},
"type":<String Setting type>,
"value":<String Setting Value>,
"description":null
}
]
}{
"settings":[
{
"category":null,
"name":<String Setting_name>,
"is_public":false,
"is_hidden":false,
"metadata":{
},
"type":<String Setting type>,
"value":<String Setting Value>,
"description":null
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 21 Jan 2021 11:21:57 GMT
Content-Type: application/json
Content-Length: 868
Connection: keep-alive
X-Compute-Request-Id: req-fa48f0ad-aa76-42fa-85ea-1e5461889fb3
{
"trust":[
{
"created_at":"2020-11-26T13:10:53.000000",
"updated_at":null,
"deleted_at":null,
[root@TVM2 ~]# pcs resource disable wlm-cron
[root@TVM2 ~]# systemctl status wlm-cron
● wlm-cron.service - workload's scheduler cron service
Loaded: loaded (/etc/systemd/system/wlm-cron.service; disabled; vendor preset : disabled)
Active: inactive (dead)
Jun 11 08:27:06 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:06 - INFO - 1...t
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 140686268624368 Child 11389 ki...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - 1...5
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Shutting down thread pool
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...l
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: Stopping the threads
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - S...s
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: All threads are stopped succes...y
Jun 11 08:27:07 TVM2 workloadmgr-cron[11115]: 11-06-2021 08:27:07 - INFO - A...y
Jun 11 08:27:09 TVM2 systemd[1]: Stopped workload's scheduler cron service.
Hint: Some lines were ellipsized, use -l to show in full.
[root@TVM2 ~]# pcs resource show wlm-cron
Resource: wlm-cron (class=systemd type=wlm-cron)
Meta Attrs: target-role=Stopped
Operations: monitor interval=30s on-fail=restart timeout=300s (wlm-cron-monito r-interval-30s)
start interval=0s on-fail=restart timeout=300s (wlm-cron-start-int erval-0s)
stop interval=0s timeout=300s (wlm-cron-stop-interval-0s)
[root@TVM2 ~]# ps -ef | grep -i workloadmgr-cron
root 15379 14383 0 08:27 pts/0 00:00:00 grep --color=auto -i workloadmgr -cron
mv triliovault-cfg-scripts triliovault-cfg-scripts_4.1old
mv /usr/local/share/kolla-ansible/ansible/roles/triliovault /opt/triliovault_4.1oldgit clone -b hotfix-13-TVO/4.1 https://github.com/trilioData/triliovault-cfg-scripts.git
cd triliovault-cfg-scripts/kolla-ansible/cp -R ansible/roles/triliovault /usr/local/share/kolla-ansible/ansible/roles/#copy the backed-up original globals.yml which is not having triliovalut variables iniside current globals.yml
cp /opt/globals.yml /etc/kolla/globals.yml
#Append Trilio global variables to globals.yml
cat ansible/triliovault_globals.yml >> /etc/kolla/globals.ymlTake backup of current password file
cp /etc/kolla/password.yml /opt/password-<CURRENT-RELEASE>.yml
#Reset the passwords file to default one by reverting the backed-up original password.yml. This backup would have been taken during previous install/upgrade.
cp /opt/password.yml /etc/kolla/password.yml
#Append Trilio password variables to passwords.yml
cat ansible/triliovault_passwords.yml >> /etc/kolla/passwords.yml
#File /etc/kolla/passwords.yml to be edited to set passwords.
#To set the passwords, it's recommended to use the same passwords as done during previous T4O deployment, as present in the password file backup (/opt/password-<CURRENT-RELEASE>.yml).
#Any additional passwords (in triliovault_passwords.yml), should be set by the user in /etc/kolla/passwords.yml.#Take backup of current site.yml file
cp /usr/local/share/kolla-ansible/ansible/site.yml /opt/site-<CURRENT-RELEASE>.yml
#Reset the site.yml to default one by reverting the backed-up original site.yml inside current site.yml. This backup would have been taken during previous install/upgrade.
cp /opt/site.yml /usr/local/share/kolla-ansible/ansible/site.yml
#Append Trilio code to site.yml
cat ansible/triliovault_site.yml >> /usr/local/share/kolla-ansible/ansible/site.ymlFor example:
If your inventory file name path '/root/multinode' then use following
#cleanup old T4O groups from /root/multinode and copy latest triliovault inventory file
cat ansible/triliovault_inventory.txt >> /root/multinodenova_libvirt_default_volumes:
- "{{ node_config_directory }}/nova-libvirt/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro"
- "/run/:/run/:shared"
- "/dev:/dev"
- "/sys/fs/cgroup:/sys/fs/cgroup"
- "kolla_logs:/var/log/kolla/"
- "libvirtd:/var/lib/libvirt"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
- "
{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}
"
- "nova_libvirt_qemu:/etc/libvirt/qemu"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }
- "/var/trilio:/var/trilio:shared"nova_compute_default_volumes:
- "{{ node_config_directory }}/nova-compute/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_os_family == 'Debian' else '' }}"
- "/lib/modules:/lib/modules:ro"
- "/run:/run:shared"
- "/dev:/dev"
- "kolla_logs:/var/log/kolla/"
- "
{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
- "libvirtd:/var/lib/libvirt"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
- "/var/trilio:/var/trilio:shared"nova_compute_ironic_default_volumes:
- "{{ node_config_directory }}/nova-compute-ironic/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "{{ '/etc/timezone:/etc/timezone:ro' if ansible_os_family == 'Debian' else '' }}"
- "kolla_logs:/var/log/kolla/"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python' ~ distro_python_version ~ '/site-packages/nova' if nova_dev_mode | bool else '' }}"
- "/var/trilio:/var/trilio:shared"docker.io/trilio/<kolla_base_distro>-binary-trilio-datamover-api:<triliovault_tag>
docker.io/trilio/<kolla_base_distro>-binary-trilio-datamover:<triliovault_tag>
docker.io/trilio/<kolla_base_distro>-binary-trilio-horizon-plugin:<triliovault_tag>kolla-ansible -i multinode pull --tags triliovaultkolla-ansible -i multinode upgrade[root@controller ~]# docker ps | grep triliovault_datamover_api
f00781997bc3 trilio/centos-binary-trilio-datamover-api:<triliovault_tag> "dumb-init --single-…" 2 minutes ago Up 2 minutes triliovault_datamover_api
[root@compute ~]# docker ps | grep triliovault_datamover
4831db5d215 trilio/centos-binary-trilio-datamover:<triliovault_tag> "dumb-init --single-…" 5 minutes ago Up 4 minutes triliovault_datamover
[root@controller ~]# docker ps | grep horizon
f3647e0fff27 trilio/centos-binary-trilio-horizon-plugin:<triliovault_tag> "dumb-init --single-…" 8 minutes ago Up 8 minutes horizonretries 5
timeout http-request 10m
timeout queue 10m
timeout connect 10m
timeout client 10m
timeout server 10m
timeout check 10m
balance roundrobin
maxconn 50000/usr/local/share/kolla-ansible/ansible/roles/triliovault/defaults/main.yml/etc/kolla/haproxy/services.d/triliovault-datamover-api.cfgHTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 21 Jan 2021 11:43:36 GMT
Content-Type: application/json
Content-Length: 868
Connection: keep-alive
X-Compute-Request-Id: req-2151b327-ea74-4eec-b606-f0df358bc2a0
{
"trust":[
{
"created_at":"2021-01-21T11:43:36.140407",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"trust-b03daf38-1615-48d6-88f9-a807c728e786",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"value":"1c981a15e7a54242ae54eee6f8d32e6a",
"description":"token id for user adfa32d7746a4341b27377d6f7c61adb project 4dfe98a43bfa404785a812020066b4d6",
"category":"identity",
"type":"trust_id",
"public":false,
"hidden":1,
"status":"available",
"is_public":false,
"is_hidden":true,
"metadata":[
]
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 21 Jan 2021 11:39:12 GMT
Content-Type: application/json
Content-Length: 888
Connection: keep-alive
X-Compute-Request-Id: req-3c2f6acb-9973-4805-bae3-cd8dbcdc2cb4
{
"trust":{
"created_at":"2020-11-26T13:15:29.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"trust-54e24d8d-6bcf-449e-8021-708b4ebc65e1",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"value":"703dfabb4c5942f7a1960736dd84f4d4",
"description":"token id for user adfa32d7746a4341b27377d6f7c61adb project 4dfe98a43bfa404785a812020066b4d6",
"category":"identity",
"type":"trust_id",
"public":false,
"hidden":true,
"status":"available",
"metadata":[
{
"created_at":"2020-11-26T13:15:29.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"86aceea1-9121-43f9-b55c-f862052374ab",
"settings_name":"trust-54e24d8d-6bcf-449e-8021-708b4ebc65e1",
"settings_project_id":"4dfe98a43bfa404785a812020066b4d6",
"key":"role_name",
"value":"member"
}
]
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 21 Jan 2021 11:41:51 GMT
Content-Type: application/json
Content-Length: 888
Connection: keep-alive
X-Compute-Request-Id: req-d838a475-f4d3-44e9-8807-81a9c32ea2a8{
"scheduler_enabled":true,
"trust":{
"created_at":"2021-01-21T11:43:36.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"name":"trust-b03daf38-1615-48d6-88f9-a807c728e786",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"value":"1c981a15e7a54242ae54eee6f8d32e6a",
"description":"token id for user adfa32d7746a4341b27377d6f7c61adb project 4dfe98a43bfa404785a812020066b4d6",
"category":"identity",
"type":"trust_id",
"public":false,
"hidden":true,
"status":"available",
"metadata":[
{
"created_at":"2021-01-21T11:43:36.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"d98d283a-b096-4a68-826a-36f99781787d",
"settings_name":"trust-b03daf38-1615-48d6-88f9-a807c728e786",
"settings_project_id":"4dfe98a43bfa404785a812020066b4d6",
"key":"role_name",
"value":"member"
}
]
},
"is_valid":true,
"scheduler_obj":{
"workload_id":"209c13fa-e743-4ccd-81f7-efdaff277a1f",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_domain_id":"default",
"user":"adfa32d7746a4341b27377d6f7c61adb",
"tenant":"4dfe98a43bfa404785a812020066b4d6"
}
}{
"trusts":{
"role_name":"member",
"is_cloud_trust":false
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 10:34:10 GMT
Content-Type: application/json
Content-Length: 7888
Connection: keep-alive
X-Compute-Request-Id: req-9d73e5e6-ca5a-4c07-bdf2-ec2e688fc339
{
"workloads":[
{
"created_at":"2020-11-02T13:40:06.000000",
"updated_at":"2020-11-09T09:53:30.000000",
"id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"availability_zone":"nova",
"workload_type_id":"f82ce76f-17fe-438b-aa37-7a023058e50d",
"name":"Workload_1",
"description":"no-description",
"interval":null,
"storage_usage":null,
"instances":null,
"metadata":[
{
"created_at":"2020-11-09T09:57:23.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"ee27bf14-e460-454b-abf5-c17e3d484ec2",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"63cd8d96-1c4a-4e61-b1e0-3ae6a17bf533",
"value":"c8468146-8117-48a4-bfd7-49381938f636"
},
{
"created_at":"2020-11-05T10:27:06.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"22d3e3d6-5a37-48e9-82a1-af2dda11f476",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"67d6a100-fee6-4aa5-83a1-66b070d2eabe",
"value":"1fb104bf-7e2b-4cb6-84f6-96aabc8f1dd2"
},
{
"created_at":"2020-11-09T09:37:20.000000",
"updated_at":"2020-11-09T09:57:23.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"61615532-6165-45a2-91e2-fbad9eb0b284",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"b083bb70-e384-4107-b951-8e9e7bbac380",
"value":"c8468146-8117-48a4-bfd7-49381938f636"
},
{
"created_at":"2020-11-02T13:40:24.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"5a53c8ee-4482-4d6a-86f2-654d2b06e28c",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"backup_media_target",
"value":"10.10.2.20:/upstream"
},
{
"created_at":"2020-11-05T10:27:14.000000",
"updated_at":"2020-11-09T09:57:23.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"5cb4dc86-a232-4916-86bf-42a0d17f1439",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"e33c1eea-c533-4945-864d-0da1fc002070",
"value":"c8468146-8117-48a4-bfd7-49381938f636"
},
{
"created_at":"2020-11-02T13:40:06.000000",
"updated_at":"2020-11-02T14:10:30.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"506cd466-1e15-416f-9f8e-b9bdb942f3e1",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"hostnames",
"value":"[\"cirros-1\", \"cirros-2\"]"
},
{
"created_at":"2020-11-02T13:40:06.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"093a1221-edb6-4957-8923-cf271f7e43ce",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"pause_at_snapshot",
"value":"0"
},
{
"created_at":"2020-11-02T13:40:06.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"79baaba8-857e-410f-9d2a-8b14670c4722",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"policy_id",
"value":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd"
},
{
"created_at":"2020-11-02T13:40:06.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"4e23fa3d-1a79-4dc8-86cb-dc1ecbd7008e",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"preferredgroup",
"value":"[]"
},
{
"created_at":"2020-11-02T14:10:30.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"ed06cca6-83d8-4d4c-913b-30c8b8418b80",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"topology",
"value":"\"\\\"\\\"\""
},
{
"created_at":"2020-11-02T13:40:23.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"4b6a80f7-b011-48d4-b5fd-f705448de076",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"key":"workload_approx_backup_size",
"value":"6"
}
],
"jobschedule":"(dp0\nVfullbackup_interval\np1\nV-1\np2\nsVretention_policy_type\np3\nVNumber of Snapshots to Keep\np4\nsVend_date\np5\nVNo End\np6\nsVstart_time\np7\nV01:45 PM\np8\nsVinterval\np9\nV5\np10\nsVenabled\np11\nI00\nsVretention_policy_value\np12\nV10\np13\nsVtimezone\np14\nVUTC\np15\nsVstart_date\np16\nV11/02/2020\np17\nsVappliance_timezone\np18\nVUTC\np19\ns.",
"status":"locked",
"error_msg":null,
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/4dfe98a43bfa404785a812020066b4d6/workloads/18b809de-d7c8-41e2-867d-4a306407fb11"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/4dfe98a43bfa404785a812020066b4d6/workloads/18b809de-d7c8-41e2-867d-4a306407fb11"
}
],
"scheduler_trust":null
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 10:42:01 GMT
Content-Type: application/json
Content-Length: 120143
Connection: keep-alive
X-Compute-Request-Id: req-b443f6e7-8d8e-413f-8d91-7c30ba166e8c
{
"workloads":[
{
"created_at":"2019-04-24T14:09:20.000000",
"updated_at":"2019-05-16T09:10:17.000000",
"id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"user_id":"6ef8135faedc4259baac5871e09f0044",
"project_id":"863b6e2a8e4747f8ba80fdce1ccf332e",
"availability_zone":"nova",
"workload_type_id":"f82ce76f-17fe-438b-aa37-7a023058e50d",
"name":"comdirect_test",
"description":"Daily UNIX Backup 03:15 PM Full 7D Keep 8",
"interval":null,
"storage_usage":null,
"instances":null,
"metadata":[
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":false,
"created_at":"2019-05-16T09:13:54.000000",
"updated_at":null,
"value":"ca544215-1182-4a8f-bf81-910f5470887a",
"version":"3.2.46",
"key":"40965cbb-d352-4618-b8b0-ea064b4819bb",
"deleted_at":null,
"id":"5184260e-8bb3-4c52-abfa-1adc05fe6997"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":true,
"created_at":"2019-04-24T14:09:30.000000",
"updated_at":"2019-05-16T09:01:23.000000",
"value":"10.10.2.20:/upstream",
"version":"3.2.46",
"key":"backup_media_target",
"deleted_at":"2019-05-16T09:01:23.000000",
"id":"02dd0630-7118-485c-9e42-b01d23aa882c"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":false,
"created_at":"2019-05-16T09:13:51.000000",
"updated_at":null,
"value":"51693eca-8714-49be-b409-f1f1709db595",
"version":"3.2.46",
"key":"eb7d6b13-21e4-45d1-b888-d3978ab37216",
"deleted_at":null,
"id":"4b79a4ef-83d6-4e5a-afb3-f4e160c5f257"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":true,
"created_at":"2019-04-24T14:09:20.000000",
"updated_at":"2019-05-16T09:01:23.000000",
"value":"[\"Comdirect_test-2\", \"Comdirect_test-1\"]",
"version":"3.2.46",
"key":"hostnames",
"deleted_at":"2019-05-16T09:01:23.000000",
"id":"0cb6a870-8f30-4325-a4ce-e9604370198e"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":false,
"created_at":"2019-04-24T14:09:20.000000",
"updated_at":"2019-05-16T09:01:23.000000",
"value":"0",
"version":"3.2.46",
"key":"pause_at_snapshot",
"deleted_at":null,
"id":"5d4f109c-9dc2-48f3-a12a-e8b8fa4f5be9"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":true,
"created_at":"2019-04-24T14:09:20.000000",
"updated_at":"2019-05-16T09:01:23.000000",
"value":"[]",
"version":"3.2.46",
"key":"preferredgroup",
"deleted_at":"2019-05-16T09:01:23.000000",
"id":"9a223fbc-7cad-4c2c-ae8a-75e6ee8a6efc"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":true,
"created_at":"2019-04-24T14:11:49.000000",
"updated_at":"2019-05-16T09:01:23.000000",
"value":"\"\\\"\\\"\"",
"version":"3.2.46",
"key":"topology",
"deleted_at":"2019-05-16T09:01:23.000000",
"id":"77e436c0-0921-4919-97f4-feb58fb19e06"
},
{
"workload_id":"0ed39f25-5df2-4cc5-820f-2af2cde6aa67",
"deleted":true,
"created_at":"2019-04-24T14:09:30.000000",
"updated_at":"2019-05-16T09:01:23.000000",
"value":"121",
"version":"3.2.46",
"key":"workload_approx_backup_size",
"deleted_at":"2019-05-16T09:01:23.000000",
"id":"79aa04dd-a102-4bd8-b672-5b7a6ce9e125"
}
],
"jobschedule":"(dp1\nVfullbackup_interval\np2\nV7\nsVretention_policy_type\np3\nVNumber of days to retain Snapshots\np4\nsVend_date\np5\nV05/31/2019\np6\nsVstart_time\np7\nS'02:15 PM'\np8\nsVinterval\np9\nV24 hrs\np10\nsVenabled\np11\nI01\nsVretention_policy_value\np12\nI8\nsS'appliance_timezone'\np13\nS'UTC'\np14\nsVtimezone\np15\nVAfrica/Porto-Novo\np16\nsVstart_date\np17\nS'04/24/2019'\np18\ns.",
"status":"locked",
"error_msg":null,
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/4dfe98a43bfa404785a812020066b4d6/workloads/orphan_workloads/4dfe98a43bfa404785a812020066b4d6/workloads/0ed39f25-5df2-4cc5-820f-2af2cde6aa67"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/4dfe98a43bfa404785a812020066b4d6/workloads/orphan_workloads/4dfe98a43bfa404785a812020066b4d6/workloads/0ed39f25-5df2-4cc5-820f-2af2cde6aa67"
}
],
"scheduler_trust":null
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 11:03:55 GMT
Content-Type: application/json
Content-Length: 100
Connection: keep-alive
X-Compute-Request-Id: req-0e58b419-f64c-47e1-adb9-21ea2a255839
{
"workloads":{
"imported_workloads":[
"faa03-f69a-45d5-a6fc-ae0119c77974"
],
"failed_workloads":[
]
}
}{
"workload_ids":[
"<workload_id>"
],
"upgrade":true
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 15:40:56 GMT
Content-Type: application/json
Content-Length: 1625
Connection: keep-alive
X-Compute-Request-Id: req-2ad95c02-54c6-4908-887b-c16c5e2f20fe
{
"quota_types":[
{
"created_at":"2020-10-19T10:05:52.000000",
"updated_at":"2020-10-19T10:07:32.000000",
"deleted_at":null,
HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 15:44:43 GMT
Content-Type: application/json
Content-Length: 342
Connection: keep-alive
X-Compute-Request-Id: req-5bf629fe-ffa2-4c90-b704-5178ba2ab09b
{
"quota_type":{
"created_at":"2020-10-19T10:05:52.000000",
"updated_at":"2020-10-19T10:07:32.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"1c5d4290-2e08-11ea-889c-7440bb00b67d",
"display_name":"Workloads",
"display_description":"Total number of workload creation allowed per project",
"status":"available"
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 15:51:51 GMT
Content-Type: application/json
Content-Length: 24
Connection: keep-alive
X-Compute-Request-Id: req-08c8cdb6-b249-4650-91fb-79a6f7497927
{
"allowed_quotas":[
{
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 16:01:39 GMT
Content-Type: application/json
Content-Length: 766
Connection: keep-alive
X-Compute-Request-Id: req-e570ce15-de0d-48ac-a9e8-60af429aebc0
{
"allowed_quotas":[
{
"id":"262b117d-e406-4209-8964-004b19a8d422",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"quota_type_id":"1c5d4290-2e08-11ea-889c-7440bb00b67d",
"allowed_value":5,
"high_watermark":4,
"version":"4.0.115",
"quota_type_name":"Workloads"
},
{
"id":"68e7203d-8a38-4776-ba58-051e6d289ee0",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"quota_type_id":"f02dd7a6-2e08-11ea-889c-7440bb00b67d",
"allowed_value":-1,
"high_watermark":-1,
"version":"4.0.115",
"quota_type_name":"Storage"
},
{
"id":"ed67765b-aea8-4898-bb1c-7c01ecb897d2",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"quota_type_id":"be323f58-2e08-11ea-889c-7440bb00b67d",
"allowed_value":50,
"high_watermark":25,
"version":"4.0.115",
"quota_type_name":"VMs"
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 16:15:07 GMT
Content-Type: application/json
Content-Length: 268
Connection: keep-alive
X-Compute-Request-Id: req-d87a57cd-c14c-44dd-931e-363158376cb7
{
"allowed_quotas":{
"id":"262b117d-e406-4209-8964-004b19a8d422",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"quota_type_id":"1c5d4290-2e08-11ea-889c-7440bb00b67d",
"allowed_value":5,
"high_watermark":4,
"version":"4.0.115",
"quota_type_name":"Workloads"
}
}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 16:24:04 GMT
Content-Type: application/json
Content-Length: 24
Connection: keep-alive
X-Compute-Request-Id: req-a4c02ee5-b86e-4808-92ba-c363b287f1a2
{"allowed_quotas": [{}]}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Wed, 18 Nov 2020 16:33:09 GMT
Content-Type: text/html; charset=UTF-8
Content-Length: 0
Connection: keep-alive{
"allowed_quotas":[
{
"project_id":"<project_id>",
"quota_type_id":"<quota_type_id>",
"allowed_value":"<integer>",
"high_watermark":"<Integer>"
}
]
}{
"allowed_quotas":{
"project_id":"c76b3355a164498aa95ddbc960adc238",
"allowed_value":"20000",
"high_watermark":"18000"
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 04 Nov 2020 12:58:38 GMT
Content-Type: application/json
Content-Length: 266
Connection: keep-alive
X-Compute-Request-Id: req-ed391cf9-aa56-4c53-8153-fd7fb238c4b9
{
"snapshots":[
{
"id":"1ff16412-a0cd-4e6a-9b4a-b5d4440fffc4",
"created_at":"2020-11-02T14:03:18.000000",
"status":"available",
"snapshot_type":"full",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"name":"snapshot",
"description":"-",
"host":"TVM1"
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 04 Nov 2020 13:58:38 GMT
Content-Type: application/json
Content-Length: 283
Connection: keep-alive
X-Compute-Request-Id: req-fb8dc382-e5de-4665-8d88-c75b2e473f5c
{
"snapshot":{
"id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"created_at":"2020-11-04T13:58:37.694637",
"status":"creating",
"snapshot_type":"full",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"name":"API taken 2",
"description":"API taken description 2",
"host":""
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 04 Nov 2020 14:07:18 GMT
Content-Type: application/json
Content-Length: 6609
Connection: keep-alive
X-Compute-Request-Id: req-f88fb28f-f4ce-4585-9c3c-ebe08a3f60cd
{
"snapshot":{
"id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"created_at":"2020-11-04T13:58:37.000000",
"updated_at":"2020-11-04T14:06:03.000000",
"finished_at":"2020-11-04T14:06:03.000000",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"status":"available",
"snapshot_type":"full",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"instances":[
{
"id":"67d6a100-fee6-4aa5-83a1-66b070d2eabe",
"name":"cirros-2",
"status":"available",
"metadata":{
"availability_zone":"nova",
"config_drive":"",
"data_transfer_time":"0",
"object_store_transfer_time":"0",
"root_partition_type":"Linux",
"trilio_ordered_interfaces":"192.168.100.80",
"vm_metadata":"{\"workload_name\": \"Workload_1\", \"workload_id\": \"18b809de-d7c8-41e2-867d-4a306407fb11\", \"trilio_ordered_interfaces\": \"192.168.100.80\", \"config_drive\": \"\"}",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"workload_name":"Workload_1"
},
"flavor":{
"vcpus":"1",
"ram":"512",
"disk":"1",
"ephemeral":"0"
},
"security_group":[
{
"name":"default",
"security_group_type":"neutron"
}
],
"nics":[
{
"mac_address":"fa:16:3e:cf:10:91",
"ip_address":"192.168.100.80",
"network":{
"id":"5fb7027d-a2ac-4a21-9ee1-438c281d2b26",
"name":"robert_internal",
"cidr":null,
"network_type":"neutron",
"subnet":{
"id":"b7b54304-aa82-4d50-91e6-66445ab56db4",
"name":"robert_internal",
"cidr":"192.168.100.0/24",
"ip_version":4,
"gateway_ip":"192.168.100.1"
}
}
}
],
"vdisks":[
{
"label":null,
"resource_id":"fa888089-5715-4228-9e5a-699f8f9d59ba",
"restore_size":1073741824,
"vm_id":"67d6a100-fee6-4aa5-83a1-66b070d2eabe",
"volume_id":"51491d30-9818-4332-b056-1f174e65d3e3",
"volume_name":"51491d30-9818-4332-b056-1f174e65d3e3",
"volume_size":"1",
"volume_type":"iscsi",
"volume_mountpoint":"/dev/vda",
"availability_zone":"nova",
"metadata":{
"readonly":"False",
"attached_mode":"rw"
}
}
]
},
{
"id":"e33c1eea-c533-4945-864d-0da1fc002070",
"name":"cirros-1",
"status":"available",
"metadata":{
"availability_zone":"nova",
"config_drive":"",
"data_transfer_time":"0",
"object_store_transfer_time":"0",
"root_partition_type":"Linux",
"trilio_ordered_interfaces":"192.168.100.176",
"vm_metadata":"{\"workload_name\": \"Workload_1\", \"workload_id\": \"18b809de-d7c8-41e2-867d-4a306407fb11\", \"trilio_ordered_interfaces\": \"192.168.100.176\", \"config_drive\": \"\"}",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"workload_name":"Workload_1"
},
"flavor":{
"vcpus":"1",
"ram":"512",
"disk":"1",
"ephemeral":"0"
},
"security_group":[
{
"name":"default",
"security_group_type":"neutron"
}
],
"nics":[
{
"mac_address":"fa:16:3e:cf:4d:27",
"ip_address":"192.168.100.176",
"network":{
"id":"5fb7027d-a2ac-4a21-9ee1-438c281d2b26",
"name":"robert_internal",
"cidr":null,
"network_type":"neutron",
"subnet":{
"id":"b7b54304-aa82-4d50-91e6-66445ab56db4",
"name":"robert_internal",
"cidr":"192.168.100.0/24",
"ip_version":4,
"gateway_ip":"192.168.100.1"
}
}
}
],
"vdisks":[
{
"label":null,
"resource_id":"c8293bb0-031a-4d33-92ee-188380211483",
"restore_size":1073741824,
"vm_id":"e33c1eea-c533-4945-864d-0da1fc002070",
"volume_id":"365ad75b-ca76-46cb-8eea-435535fd2e22",
"volume_name":"365ad75b-ca76-46cb-8eea-435535fd2e22",
"volume_size":"1",
"volume_type":"iscsi",
"volume_mountpoint":"/dev/vda",
"availability_zone":"nova",
"metadata":{
"readonly":"False",
"attached_mode":"rw"
}
}
]
}
],
"name":"API taken 2",
"description":"API taken description 2",
"host":"TVM1",
"size":44171264,
"restore_size":2147483648,
"uploaded_size":44171264,
"progress_percent":100,
"progress_msg":"Snapshot of workload is complete",
"warning_msg":null,
"error_msg":null,
"time_taken":428,
"pinned":false,
"metadata":[
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"16fc1ce5-81b2-4c07-ac63-6c9232e0418f",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"backup_media_target",
"value":"10.10.2.20:/upstream"
},
{
"created_at":"2020-11-04T13:58:37.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"5a56bbad-9957-4fb3-9bbc-469ec571b549",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"cancel_requested",
"value":"0"
},
{
"created_at":"2020-11-04T14:05:29.000000",
"updated_at":"2020-11-04T14:05:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"d36abef7-9663-4d88-8f2e-ef914f068fb4",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"data_transfer_time",
"value":"0"
},
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"c75f9151-ef87-4a74-acf1-42bd2588ee64",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"hostnames",
"value":"[\"cirros-1\", \"cirros-2\"]"
},
{
"created_at":"2020-11-04T14:05:29.000000",
"updated_at":"2020-11-04T14:05:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"02916cce-79a2-4ad9-a7f6-9d9f59aa8424",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"object_store_transfer_time",
"value":"0"
},
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"96efad2f-a24f-4cde-8e21-9cd78f78381b",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"pause_at_snapshot",
"value":"0"
},
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"572a0b21-a415-498f-b7fa-6144d850ef56",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"policy_id",
"value":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd"
},
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"dfd7314d-8443-4a95-8e2a-7aad35ef97ea",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"preferredgroup",
"value":"[]"
},
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"2e17e1e4-4bb1-48a9-8f11-c4cd2cfca2a9",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"topology",
"value":"\"\\\"\\\"\""
},
{
"created_at":"2020-11-04T14:05:57.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"33762790-8743-4e20-9f50-3505a00dbe76",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"key":"workload_approx_backup_size",
"value":"6"
}
],
"restores_info":""
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 04 Nov 2020 14:18:36 GMT
Content-Type: application/json
Content-Length: 56
Connection: keep-alive
X-Compute-Request-Id: req-82ffb2b6-b28e-4c73-89a4-310890960dbc
{"task": {"id": "a73de236-6379-424a-abc7-33d553e050b7"}}
HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Wed, 04 Nov 2020 14:26:44 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-47a5a426-c241-429e-9d69-d40aed0dd68d{
"snapshot":{
"is_scheduled":<true/false>,
"name":"<name>",
"description":"<description>"
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 29 Oct 2020 14:55:40 GMT
Content-Type: application/json
Content-Length: 3480
Connection: keep-alive
X-Compute-Request-Id: req-a2e49b7e-ce0f-4dcb-9e61-c5a4756d9948
{
"workloads":[
{
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"id":"8ee7a61d-a051-44a7-b633-b495e6f8fc1d",
HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Thu, 29 Oct 2020 15:42:02 GMT
Content-Type: application/json
Content-Length: 703
Connection: keep-alive
X-Compute-Request-Id: req-443b9dea-36e6-4721-a11b-4dce3c651ede
{
"workload":{
"project_id":"c76b3355a164498aa95ddbc960adc238",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"id":"c4e3aeeb-7d87-4c49-99ed-677e51ba715e",
"name":"API created",
"snapshots_info":"",
"description":"API description",
"workload_type_id":"f82ce76f-17fe-438b-aa37-7a023058e50d",
"status":"creating",
"created_at":"2020-10-29T15:42:01.000000",
"updated_at":"2020-10-29T15:42:01.000000",
"scheduler_trust":null,
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/c76b3355a164498aa95ddbc960adc238/workloads/c4e3aeeb-7d87-4c49-99ed-677e51ba715e"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/c76b3355a164498aa95ddbc960adc238/workloads/c4e3aeeb-7d87-4c49-99ed-677e51ba715e"
}
]
}
}retention_policy_type
retention_policy_value
intervalHTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Mon, 02 Nov 2020 12:08:42 GMT
Content-Type: application/json
Content-Length: 1536
Connection: keep-alive
X-Compute-Request-Id: req-afb76abb-aa33-427e-8219-04fc2b91bce0
{
"workload":{
"created_at":"2020-10-29T15:42:01.000000",
"updated_at":"2020-10-29T15:42:18.000000",
"id":"c4e3aeeb-7d87-4c49-99ed-677e51ba715e",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"availability_zone":"nova",
"workload_type_id":"f82ce76f-17fe-438b-aa37-7a023058e50d",
"name":"API created",
"description":"API description",
"interval":null,
"storage_usage":{
"usage":0,
"full":{
"snap_count":0,
"usage":0
},
"incremental":{
"snap_count":0,
"usage":0
}
},
"instances":[
{
"id":"08dab61c-6efd-44d3-a9ed-8e789d338c1b",
"name":"cirros-4",
"metadata":{
}
},
{
"id":"7c1bb5d2-aa5a-44f7-abcd-2d76b819b4c8",
"name":"cirros-3",
"metadata":{
}
}
],
"metadata":{
"hostnames":"[]",
"meta":"data",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"preferredgroup":"[]",
"workload_approx_backup_size":"6"
},
"jobschedule":{
"retention_policy_type":"Number of Snapshots to Keep",
"end_date":"15/27/2020",
"start_time":"3:00 PM",
"interval":"5",
"enabled":false,
"retention_policy_value":"10",
"timezone":"UTC+2",
"start_date":"10/27/2020",
"fullbackup_interval":"-1",
"appliance_timezone":"UTC",
"global_jobscheduler":true
},
"status":"available",
"error_msg":null,
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/c76b3355a164498aa95ddbc960adc238/workloads/c4e3aeeb-7d87-4c49-99ed-677e51ba715e"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/c76b3355a164498aa95ddbc960adc238/workloads/c4e3aeeb-7d87-4c49-99ed-677e51ba715e"
}
],
"scheduler_trust":null
}
}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Mon, 02 Nov 2020 12:31:42 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-674a5d71-4aeb-4f99-90ce-7e8d3158d137retention_policy_type
retention_policy_value
intervalHTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Mon, 02 Nov 2020 13:31:00 GMT
Content-Type: text/html; charset=UTF-8
Content-Length: 0
Connection: keep-aliveHTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Mon, 02 Nov 2020 13:41:55 GMT
Content-Type: text/html; charset=UTF-8
Content-Length: 0
Connection: keep-aliveHTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Mon, 02 Nov 2020 13:52:30 GMT
Content-Type: text/html; charset=UTF-8
Content-Length: 0
Connection: keep-alive{
"workload":{
"name":"<name of the Workload>",
"description":"<description of workload>",
"workload_type_id":"<ID of the chosen Workload Type",
"source_platform":"openstack",
"instances":[
{
"instance-id":"<Instance ID>"
},
{
"instance-id":"<Instance ID>"
}
],
"jobschedule":{
"retention_policy_type":"<'Number of Snapshots to Keep'/'Number of days to retain Snapshots'>",
"retention_policy_value":"<Integer>"
"timezone":"<timezone>",
"start_date":"<Date format: MM/DD/YYYY>"
"end_date":"<Date format MM/DD/YYYY>",
"start_time":"<Time format: HH:MM AM/PM>",
"interval":"<Format: Integer hr",
"enabled":"<True/False>"
},
"metadata":{
<key>:<value>,
"policy_id":"<policy_id>"
}
}
}{
"workload":{
"name":"<name>",
"description":"<description>"
"instances":[
{
"instance-id":"<instance_id>"
},
{
"instance-id":"<instance_id>"
}
],
"jobschedule":{
"retention_policy_type":"<'Number of Snapshots to Keep'/'Number of days to retain Snapshots'>",
"retention_policy_value":"<Integer>",
"timezone":"<timezone>",
"start_time":"<HH:MM AM/PM>",
"end_date":"<MM/DD/YYYY>",
"interval":"<Integer hr>",
"enabled":"<True/False>"
},
"metadata":{
"meta":"data",
"policy_id":"<policy_id>"
},
}
}cd triliovault-cfg-scripts/redhat-director-scripts/<RHOSP_RELEASE_Directory/
vi trilio_env.yamlcd /home/stack
git clone -b hotfix-13-TVO/4.1 https://github.com/trilioData/triliovault-cfg-scripts.gitcd triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/scripts/
./upload_puppet_module.sh
./upload_puppet_module.sh
Creating tarball...
Tarball created.
Creating heat environment file: /home/stack/.tripleo/environments/puppet-modules-url.yaml
Uploading file to swift: /tmp/puppet-modules-8Qjya2X/puppet-modules.tar.gz
+-----------------------+---------------------+----------------------------------+
| object | container | etag |
+-----------------------+---------------------+----------------------------------+
| puppet-modules.tar.gz | overcloud-artifacts | 368951f6a4d39cfe53b5781797b133ad |
+-----------------------+---------------------+----------------------------------+
## Above command creates the following file.
ls -ll /home/stack/.tripleo/environments/puppet-modules-url.yaml(undercloud) [stack@ucloud161 ~]$ cat /home/stack/.tripleo/environments/puppet-modules-url.yaml
# Heat environment to deploy artifacts via Swift Temp URL(s)
parameter_defaults:
DeployArtifactURLs:
- 'http://172.25.0.103:8080/v1/AUTH_46ba596219d143c8b076e9fcc4139fed/overcloud-artifacts/puppet-modules.tar.gz?temp_url_sig=c3972b7ce75226c278ab3fa8237d31cc1f2115bd&temp_url_expires=1646738377'
(undercloud) [stack@ucloud161 ~]$ cat /home/stack/.tripleo/environments/puppet-modules-url.yaml | grep http >> /home/stack/templates/user-artifacts.yaml
(undercloud) [stack@ucloud161 ~]$ cat /home/stack/templates/user-artifacts.yaml
# Heat environment to deploy artifacts via Swift Temp URL(s)
parameter_defaults:
DeployArtifactURLs:
- 'http://172.25.0.103:8080/v1/AUTH_57ba596219d143c8b076e9fcc4139f3g/overcloud-artifacts/some-artifact.tar.gz?temp_url_sig=dc972b7ce75226c278ab3fa8237d31cc1f2115sc&temp_url_expires=3446738365'
- 'http://172.25.0.103:8080/v1/AUTH_46ba596219d143c8b076e9fcc4139fed/overcloud-artifacts/puppet-modules.tar.gz?temp_url_sig=c3972b7ce75226c278ab3fa8237d31cc1f2115bd&temp_url_expires=1646738377'
'OS::TripleO::Services::TrilioDatamoverApi''OS::TripleO::Services::TrilioDatamover' Trilio Datamover container:
registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
Trilio Datamover Api Container:
registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
Trilio horizon plugin:
registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13Trilio Datamover container:
registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
Trilio Datamover Api Container:
registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
Trilio horizon plugin:
registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1Trilio Datamover container:
registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
Trilio Datamover Api Container:
registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
Trilio horizon plugin:
registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2parameter_defaults:
ContainerImagePrepare:
- push_destination: false
set:
namespace: registry.redhat.io/...
...
...
ContainerImageRegistryCredentials:
registry.redhat.io:
myuser: 'p@55w0rd!'
registry.connect.redhat.com:
myuser: 'p@55w0rd!'
ContainerImageRegistryLogin: true# For RHOSP13
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
DockerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13
# For RHOSP16.1
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1
# For RHOSP16.2
$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: registry.connect.redhat.com/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
DockerTrilioDmApiImage: registry.connect.redhat.com/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
ContainerHorizonImage: registry.connect.redhat.com/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp13/scripts/
./prepare_trilio_images.sh <undercloud_ip/hostname> <HOTFIX-TAG-VERSION>-rhosp13
$ grep 'Image' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: 172.25.2.2:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
DockerTrilioDmApiImage: 172.25.2.2:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
DockerHorizonImage: 172.25.2.2:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/scripts/
sudo ./prepare_trilio_images.sh <undercloud_ip/hostname> <HOTFIX-TAG-VERSION>-rhosp16.1
$ grep 'Image' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1cd /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.2/scripts/
sudo ./prepare_trilio_images.sh <undercloud_ip/hostname> <HOTFIX-TAG-VERSION>-rhosp16.2
$ grep 'Image' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
DockerTrilioDmApiImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
ContainerHorizonImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2(undercloud) [stack@undercloud redhat-director-scripts/<RHOSP_RELEASE_DIRECTORY>]$ openstack tripleo container image list | grep trilio
| docker://undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1 | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1 | |
| docker://undercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1 |
-----------------------------------------------------------------------------------------------------
(undercloud) [stack@undercloud redhat-director-scripts]$ grep 'Image' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: undercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1$ grep 'Images' ../environments/trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp13
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp13
DockerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp13$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
ContainerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1$ grep 'Image' trilio_env.yaml
DockerTrilioDatamoverImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.2
DockerTrilioDmApiImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.2
ContainerHorizonImage: <SATELLITE_REGISTRY_URL>/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.2resource_registry:
OS::TripleO::Services::TrilioDatamover: ../services/trilio-datamover.yaml
OS::TripleO::Services::TrilioDatamoverApi: ../services/trilio-datamover-api.yaml
# NOTE: If there are addition customizations to the endpoint map (e.g. for
# other integration), this will need to be regenerated.
OS::TripleO::EndpointMap: endpoint_map.yaml
parameter_defaults:
## Enable Trilio's quota functionality on horizon
ExtraConfig:
horizon::customization_module: 'dashboards.overrides'
## Define network map for trilio datamover api service
ServiceNetMap:
TrilioDatamoverApiNetwork: internal_api
## Trilio Datamover Password for keystone and database
TrilioDatamoverPassword: "test1234"
## Trilio container pull urls
DockerTrilioDatamoverImage: devundercloud.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-TAG-VERSION>-rhosp16.1
DockerTrilioDmApiImage: devundercloud.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-TAG-VERSION>-rhosp16.1
## If you do not want Trilio's horizon plugin to replace your horizon container, just comment following line.
ContainerHorizonImage: devundercloud.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-TAG-VERSION>-rhosp16.1
## Backup target type nfs/s3, used to store snapshots taken by triliovault
BackupTargetType: 'nfs'
## For backup target 'nfs'
NfsShares: '192.168.122.101:/opt/tvault'
NfsOptions: 'nolock,soft,timeo=180,intr,lookupcache=none'
## For backup target 's3'
## S3 type: amazon_s3/ceph_s3
S3Type: 'amazon_s3'
## S3 access key
S3AccessKey: ''
## S3 secret key
S3SecretKey: ''
## S3 region, if your s3 does not have any region, just keep the parameter as it is
S3RegionName: ''
## S3 bucket name
S3Bucket: ''
## S3 endpoint url, not required for Amazon S3, keep it as it is
S3EndpointUrl: ''
## S3 signature version
S3SignatureVersion: 'default'
## S3 Auth version
S3AuthVersion: 'DEFAULT'
## If S3 backend is not Amazon S3 and SSL is enabled on S3 endpoint url then change it to 'True', otherwise keep it as 'False'
S3SslEnabled: False
## If S3 backend is not Amazon S3 and SSL is enabled on S3 endpoint URL and SSL certificates are self signed, then
## user need to set this parameter value to: '/etc/tvault-contego/s3-cert.pem', otherwise keep it's value as empty string.
S3SslCert: ''
## Don't edit following parameter
EnablePackageInstall: True/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfglisten trilio_datamover_api
bind 172.25.0.107:13784 transparent ssl crt /etc/pki/tls/private/overcloud_endpoint.pem
bind 172.25.0.107:8784 transparent
balance roundrobin
http-request set-header X-Forwarded-Proto https if { ssl_fc }
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
http-request set-header X-Forwarded-Port %[dst_port]
maxconn 50000
option httpchk
option httplog
retries 5
timeout check 10m
timeout client 10m
timeout connect 10m
timeout http-request 10m
timeout queue 10m
timeout server 10m
server overcloud-controller-0.internalapi.localdomain 172.25.0.106:8784 check fall 5 inter 2000 rise 2
retries 5
timeout http-request 10m
timeout queue 10m
timeout connect 10m
timeout client 10m
timeout server 10m
timeout check 10m
balance roundrobin
maxconn 50000/home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp13/services/trilio-datamover-api.yaml/home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16/services/trilio-datamover-api.yaml/home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/services/trilio-datamover-api.yaml/home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.2/services/trilio-datamover-api.yaml tripleo::haproxy::trilio_datamover_api::options:
'retries': '5'
'maxconn': '50000'
'balance': 'roundrobin'
'timeout http-request': '10m'
'timeout queue': '10m'
'timeout connect': '10m'
'timeout client': '10m'
'timeout server': '10m'
'timeout check': '10m'openstack overcloud deploy --templates \
-e /home/stack/templates/node-info.yaml \
-e /home/stack/templates/overcloud_images.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/environments/trilio_env.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/enable-tls.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ssl/inject-trust-anchor.yaml \
-e /home/stack/triliovault-cfg-scripts/redhat-director-scripts/rhosp16.1/environments/trilio_env_tls_endpoints_public_dns.yaml \
--ntp-server 192.168.1.34 \
--libvirt-type qemu \
--log-file overcloud_deploy.log \
-r /usr/share/openstack-tripleo-heat-templates/roles_data.yaml[root@overcloud-controller-0 heat-admin]# podman ps | grep trilio
26fcb9194566 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-datamover-api:<HOTFIX-REL-VERSION>-rhosp16.1 kolla_start 5 days ago Up 5 days ago trilio_dmapi
094971d0f5a9 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-REL-VERSION>-rhosp16.1 kolla_start 5 days ago Up 5 days ago horizon/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg[root@overcloud-novacompute-0 heat-admin]# podman ps | grep trilio
b1840444cc59 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-datamover:<HOTFIX-REL-VERSION>-rhosp16.1 kolla_start 5 days ago Up 5 days ago trilio_datamover[root@overcloud-controller-0 heat-admin]# podman ps | grep horizon
094971d0f5a9 rhosptrainqa.ctlplane.localdomain:8787/trilio/trilio-horizon-plugin:<HOTFIX-REL-VERSION>-rhosp16.1 kolla_start 5 days ago Up 5 days ago horizon## Download the shell script
$ curl -O https://raw.githubusercontent.com/trilioData/triliovault-cfg-scripts/master/common/nova_userid.sh
## Assign executable permissions
$ chmod +x nova_userid.sh
## Execute the shell script to change 'nova' user and group id to '42436'
$ ./nova_userid.sh
## Ignore any errors and verify that 'nova' user and group id has changed to '42436'
$ id nova
uid=42436(nova) gid=42436(nova) groups=42436(nova),990(libvirt),36(kvm)openstack stack failures list overcloud
heat stack-list --show-nested -f "status=FAILED"
heat resource-list --nested-depth 5 overcloud | grep FAILED
=> If trilio datamover api containers does not start well or in restarting state, use following logs to debug.
docker logs trilio_dmapi
tailf /var/log/containers/trilio-datamover-api/dmapi.log
=> If trilio datamover containers does not start well or in restarting state, use following logs to debug.
docker logs trilio_datamover
tailf /var/log/containers/trilio-datamover/tvault-contego.logtls-everywhere-endpoints-dns.yaml file, useenvironments/trilio_env_tls_everywhere_dns.yaml--display-description <display-description> ➡️ Optional description for restore.--display-description <display-description> ➡️ Optional description for restore.--display-description <display-description> ➡️ Optional description for restore.oneclickrestore <True/False>➡️If the restore is a oneclick restore. Setting this to True will override all other settings and a One Click Restore is started.Nics ➡️ list of openstack Neutron ports that shall be attached to the instance. Each Neutron Port consists of:new_volume_type ➡️ The Volume Type to use for the restored Volume. Leave empty for Volume Type Noneid ➡️ Original ID of the VolumeHTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 13:56:08 GMT
Content-Type: application/json
Content-Length: 1399
Connection: keep-alive
X-Compute-Request-Id: req-4618161e-64e4-489a-b8fc-f3cb21d94096
{
"policy_list":[
{
"id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":"2020-10-26T12:52:22.000000",
workloadmgr restore-list [--snapshot_id <snapshot_id>]workloadmgr restore-show [--output <output>] <restore_id>workloadmgr restore-delete <restores_id>workloadmgr restore-cancel <restore_id>workloadmgr snapshot-oneclick-restore [--display-name <display-name>]
[--display-description <display-description>]
<snapshot_id>workloadmgr snapshot-selective-restore [--display-name <display-name>]
[--display-description <display-description>]
[--filename <filename>]
<snapshot_id>workloadmgr snapshot-inplace-restore [--display-name <display-name>]
[--display-description <display-description>]
[--filename <filename>]
<snapshot_id>{
oneclickrestore: False,
restore_type: selective,
type: openstack,
openstack:
{
instances:
[
{
include: True,
id: 890888bc-a001-4b62-a25b-484b34ac6e7e,
name: cdcentOS-1,
availability_zone:,
nics: [],
vdisks:
[
{
id: 4cc2b474-1f1b-4054-a922-497ef5564624,
new_volume_type:,
availability_zone: nova
}
],
flavor:
{
ram: 512,
ephemeral: 0,
vcpus: 1,
swap:,
disk: 1,
id: 1
}
}
],
restore_topology: True,
networks_mapping:
{
networks: []
}
}
}'instances':[
{
'name':'cdcentOS-1-selective',
'availability_zone':'US-East',
'nics':[
{
'mac_address':'fa:16:3e:00:bd:60',
'ip_address':'192.168.0.100',
'id':'8b871820-f92e-41f6-80b4-00555a649b4c',
'network':{
'subnet':{
'id':'2b1506f4-2a7a-4602-a8b9-b7e8a49f95b8'
},
'id':'d5047e84-077e-4b38-bc43-e3360b0ad174'
}
}
],
'vdisks':[
{
'id':'4cc2b474-1f1b-4054-a922-497ef5564624',
'new_volume_type':'ceph',
'availability_zone':'nova'
}
],
'flavor':{
'ram':2048,
'ephemeral':0,
'vcpus':1,
'swap':'',
'disk':20,
'id':'2'
},
'include':True,
'id':'890888bc-a001-4b62-a25b-484b34ac6e7e'
}
]restore_topology:Truerestore_topology:False{
'oneclickrestore':False,
'openstack':{
'instances':[
{
'name':'cdcentOS-1-selective',
'availability_zone':'US-East',
'nics':[
{
'mac_address':'fa:16:3e:00:bd:60',
'ip_address':'192.168.0.100',
'id':'8b871820-f92e-41f6-80b4-00555a649b4c',
'network':{
'subnet':{
'id':'2b1506f4-2a7a-4602-a8b9-b7e8a49f95b8'
},
'id':'d5047e84-077e-4b38-bc43-e3360b0ad174'
}
}
],
'vdisks':[
{
'id':'4cc2b474-1f1b-4054-a922-497ef5564624',
'new_volume_type':'ceph',
'availability_zone':'nova'
}
],
'flavor':{
'ram':2048,
'ephemeral':0,
'vcpus':1,
'swap':'',
'disk':20,
'id':'2'
},
'include':True,
'id':'890888bc-a001-4b62-a25b-484b34ac6e7e'
}
],
'restore_topology':False,
'networks_mapping':{
'networks':[
{
'snapshot_network':{
'subnet':{
'id':'8b609440-4abf-4acf-a36b-9a0fa70c383c'
},
'id':'8b871820-f92e-41f6-80b4-00555a649b4c'
},
'target_network':{
'subnet':{
'id':'2b1506f4-2a7a-4602-a8b9-b7e8a49f95b8'
},
'id':'d5047e84-077e-4b38-bc43-e3360b0ad174',
'name':'internal'
}
}
]
}
},
'restore_type':'selective',
'type':'openstack'
}{
'oneclickrestore':False,
'restore_type':'inplace',
'type':'openstack',
'openstack':{
'instances':[
{
'restore_boot_disk':True,
'include':True,
'id':'ba8c27ab-06ed-4451-9922-d919171078de',
'vdisks':[
{
'restore_cinder_volume':True,
'id':'04d66b70-6d7c-4d1b-98e0-11059b89cba6',
}
]
}
]
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Fri, 13 Nov 2020 14:18:42 GMT
Content-Type: application/json
Content-Length: 2160
Connection: keep-alive
X-Compute-Request-Id: req-0583fc35-0f80-4746-b280-c17b32cc4b25
{
"policy":{
"id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":"2020-10-26T12:52:22.000000",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"status":"available",
"name":"Gold",
"description":"",
"field_values":[
{
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"0201f8b4-482d-4ec1-9b92-8cf3092abcc2",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"policy_field_name":"retention_policy_value",
"value":"10"
},
{
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"48cc7007-e221-44de-bd4e-6a66841bdee0",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"policy_field_name":"interval",
"value":"5"
},
{
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"79070c67-9021-4220-8a79-648ffeebc144",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"policy_field_name":"retention_policy_type",
"value":"Number of Snapshots to Keep"
},
{
"created_at":"2020-10-26T12:52:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"9fec205a-9528-45ea-a118-ffb64d8c7d9d",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"policy_field_name":"fullbackup_interval",
"value":"-1"
}
],
"metadata":[
],
"policy_assignments":[
{
"created_at":"2020-10-26T12:53:01.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"3e3f1b12-1b1f-452b-a9d2-b6e5fbf2ab18",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"policy_name":"Gold",
"project_name":"admin"
},
{
"created_at":"2020-10-29T15:39:13.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"8b4a6236-63f1-4e2d-b8d1-23b37f4b4346",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"policy_name":"Gold",
"project_name":"robert"
}
]
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 09:14:01 GMT
Content-Type: application/json
Content-Length: 338
Connection: keep-alive
X-Compute-Request-Id: req-57175488-d267-4dcb-90b5-f239d8b02fe2
{
"policies":[
{
"created_at":"2020-10-29T15:39:13.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"8b4a6236-63f1-4e2d-b8d1-23b37f4b4346",
"policy_id":"b79aa5f3-405b-4da4-96e2-893abf7cb5fd",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"policy_name":"Gold",
"project_name":"robert"
}
]
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 09:24:03 GMT
Content-Type: application/json
Content-Length: 1413
Connection: keep-alive
X-Compute-Request-Id: req-05e05333-b967-4d4e-9c9b-561f1a7add5a
{
"policy":{
"id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:24:01.000000",
"status":"available",
"name":"CLI created",
"description":"CLI created",
"metadata":[
],
"field_values":[
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"767ae42d-caf0-4d36-963c-9b0e50991711",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"interval",
"value":"4 hr"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"7e34ce5c-3de0-408e-8294-cc091bee281f",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"retention_policy_value",
"value":"10"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"95537f7c-e59a-4365-b1e9-7fa2ed49c677",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"retention_policy_type",
"value":"Number of Snapshots to Keep"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"f635bece-be61-4e72-bce4-bc72a6f549e3",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"fullbackup_interval",
"value":"-1"
}
]
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 09:32:13 GMT
Content-Type: application/json
Content-Length: 1515
Connection: keep-alive
X-Compute-Request-Id: req-9104cf1c-4025-48f5-be92-1a6b7117bf95
{
"policy":{
"id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:24:01.000000",
"status":"available",
"name":"API created",
"description":"API created",
"metadata":[
],
"field_values":[
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"767ae42d-caf0-4d36-963c-9b0e50991711",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"interval",
"value":"8 hr"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"7e34ce5c-3de0-408e-8294-cc091bee281f",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"retention_policy_value",
"value":"20"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"95537f7c-e59a-4365-b1e9-7fa2ed49c677",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"retention_policy_type",
"value":"Number of days to retain Snapshots"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"f635bece-be61-4e72-bce4-bc72a6f549e3",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"fullbackup_interval",
"value":"7"
}
]
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 09:46:23 GMT
Content-Type: application/json
Content-Length: 2318
Connection: keep-alive
X-Compute-Request-Id: req-169a53e4-b1c9-4bd1-bf68-3416d177d868
{
"policy":{
"id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:24:01.000000",
"user_id":"adfa32d7746a4341b27377d6f7c61adb",
"project_id":"4dfe98a43bfa404785a812020066b4d6",
"status":"available",
"name":"API created",
"description":"API created",
"field_values":[
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"767ae42d-caf0-4d36-963c-9b0e50991711",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"interval",
"value":"8 hr"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"7e34ce5c-3de0-408e-8294-cc091bee281f",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"retention_policy_value",
"value":"20"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"95537f7c-e59a-4365-b1e9-7fa2ed49c677",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"retention_policy_type",
"value":"Number of days to retain Snapshots"
},
{
"created_at":"2020-11-17T09:24:01.000000",
"updated_at":"2020-11-17T09:31:45.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"f635bece-be61-4e72-bce4-bc72a6f549e3",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"policy_field_name":"fullbackup_interval",
"value":"7"
}
],
"metadata":[
],
"policy_assignments":[
{
"created_at":"2020-11-17T09:46:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"4794ed95-d8d1-4572-93e8-cebd6d4df48f",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"project_id":"cbad43105e404c86a1cd07c48a737f9c",
"policy_name":"API created",
"project_name":"services"
},
{
"created_at":"2020-11-17T09:46:22.000000",
"updated_at":null,
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"68f187a6-3526-4a35-8b2d-cb0e9f497dd8",
"policy_id":"23176f20-9e9d-4fc3-9d3d-f10d2b184163",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"policy_name":"API created",
"project_name":"robert"
}
]
},
"failed_ids":[
]
}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Tue, 17 Nov 2020 09:56:03 GMT
Content-Type: text/html; charset=UTF-8
Content-Length: 0
Connection: keep-alive{
"workload_policy":{
"field_values":{
"fullbackup_interval":"<-1 for never / 0 for always / Integer>",
"retention_policy_type":"<Number of Snapshots to Keep/Number of days to retain Snapshots>",
"interval":"<Integer hr>",
"retention_policy_value":"<Integer>"
},
"display_name":"<String>",
"display_description":"<String>",
"metadata":{
<key>:<value>
}
}
}{
"policy":{
"field_values":{
"fullbackup_interval":"<-1 for never / 0 for always / Integer>",
"retention_policy_type":"<Number of Snapshots to Keep/Number of days to retain Snapshots>",
"interval":"<Integer hr>",
"retention_policy_value":"<Integer>"
},
"display_name":"String",
"display_description":"String",
"metadata":{
<key>:<value>
}
}
}{
"policy":{
"remove_projects":[
"<project_id>"
],
"add_projects":[
"<project_id>",
]
}
}subnet➡️subnet the port is assigned to. Contains the following information:HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 05 Nov 2020 11:28:43 GMT
Content-Type: application/json
Content-Length: 4308
Connection: keep-alive
X-Compute-Request-Id: req-0bc531b6-be6e-43b4-90bd-39ef26ef1463
{
"restores":[
{
"id":"29fdc1f8-1d53-4a10-bb45-e539a64cdbfc",
"created_at":"2020-11-05T10:17:40.000000",
"updated_at":"2020-11-05T10:17:40.000000",
HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 05 Nov 2020 14:04:45 GMT
Content-Type: application/json
Content-Length: 2639
Connection: keep-alive
X-Compute-Request-Id: req-30640219-e94e-4651-9b9e-49f5574e2a7f
{
"restore":{
"id":"29fdc1f8-1d53-4a10-bb45-e539a64cdbfc",
"created_at":"2020-11-05T10:17:40.000000",
"updated_at":"2020-11-05T10:17:40.000000",
"finished_at":"2020-11-05T10:27:20.000000",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"status":"available",
"restore_type":"restore",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"snapshot_details":{
"created_at":"2020-11-04T13:58:37.000000",
"updated_at":"2020-11-05T10:27:22.000000",
"deleted_at":null,
"deleted":false,
"version":"4.0.115",
"id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"snapshot_type":"full",
"display_name":"API taken 2",
"display_description":"API taken description 2",
"size":44171264,
"restore_size":2147483648,
"uploaded_size":44171264,
"progress_percent":100,
"progress_msg":"Creating Instance: cirros-2",
"warning_msg":null,
"error_msg":null,
"host":"TVM1",
"finished_at":"2020-11-04T14:06:03.000000",
"data_deleted":false,
"pinned":false,
"time_taken":428,
"vault_storage_id":null,
"status":"available"
},
"workload_id":"18b809de-d7c8-41e2-867d-4a306407fb11",
"instances":[
{
"id":"1fb104bf-7e2b-4cb6-84f6-96aabc8f1dd2",
"name":"cirros-2",
"status":"available",
"metadata":{
"config_drive":"",
"instance_id":"67d6a100-fee6-4aa5-83a1-66b070d2eabe",
"production":"1"
}
},
{
"id":"b083bb70-e384-4107-b951-8e9e7bbac380",
"name":"cirros-1",
"status":"available",
"metadata":{
"config_drive":"",
"instance_id":"e33c1eea-c533-4945-864d-0da1fc002070",
"production":"1"
}
}
],
"networks":[
],
"subnets":[
],
"routers":[
],
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/c76b3355a164498aa95ddbc960adc238/restores/29fdc1f8-1d53-4a10-bb45-e539a64cdbfc"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/c76b3355a164498aa95ddbc960adc238/restores/29fdc1f8-1d53-4a10-bb45-e539a64cdbfc"
}
],
"name":"OneClick Restore",
"description":"-",
"host":"TVM2",
"size":2147483648,
"uploaded_size":2147483648,
"progress_percent":100,
"progress_msg":"Restore from snapshot is complete",
"warning_msg":null,
"error_msg":null,
"time_taken":580,
"restore_options":{
"name":"OneClick Restore",
"oneclickrestore":true,
"restore_type":"oneclick",
"openstack":{
"instances":[
{
"name":"cirros-2",
"id":"67d6a100-fee6-4aa5-83a1-66b070d2eabe",
"availability_zone":"nova"
},
{
"name":"cirros-1",
"id":"e33c1eea-c533-4945-864d-0da1fc002070",
"availability_zone":"nova"
}
]
},
"type":"openstack",
"description":"-"
},
"metadata":[
]
}
}HTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 05 Nov 2020 14:21:07 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-0e155b21-8931-480a-a749-6d8764666e4dHTTP/1.1 200 OK
Server: nginx/1.16.1
Date: Thu, 05 Nov 2020 15:13:30 GMT
Content-Type: application/json
Content-Length: 0
Connection: keep-alive
X-Compute-Request-Id: req-98d4853c-314c-4f27-bd3f-f81bda1a2840HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Thu, 05 Nov 2020 14:30:56 GMT
Content-Type: application/json
Content-Length: 992
Connection: keep-alive
X-Compute-Request-Id: req-7e18c309-19e5-49cb-a07e-90dd368fddae
{
"restore":{
"id":"3df1d432-2f76-4ebd-8f89-1275428842ff",
"created_at":"2020-11-05T14:30:56.048656",
"updated_at":"2020-11-05T14:30:56.048656",
"finished_at":null,
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"status":"restoring",
"restore_type":"restore",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/c76b3355a164498aa95ddbc960adc238/restores/3df1d432-2f76-4ebd-8f89-1275428842ff"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/c76b3355a164498aa95ddbc960adc238/restores/3df1d432-2f76-4ebd-8f89-1275428842ff"
}
],
"name":"One Click Restore",
"description":"One Click Restore",
"host":"",
"size":0,
"uploaded_size":0,
"progress_percent":0,
"progress_msg":null,
"warning_msg":null,
"error_msg":null,
"time_taken":0,
"restore_options":{
"openstack":{
},
"type":"openstack",
"oneclickrestore":true,
"vmware":{
},
"restore_type":"oneclick"
},
"metadata":[
]
}
}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Mon, 09 Nov 2020 09:53:31 GMT
Content-Type: application/json
Content-Length: 1713
Connection: keep-alive
X-Compute-Request-Id: req-84f00d6f-1b12-47ec-b556-7b3ed4c2f1d7
{
"restore":{
"id":"778baae0-6c64-4eb1-8fa3-29324215c43c",
"created_at":"2020-11-09T09:53:31.037588",
"updated_at":"2020-11-09T09:53:31.037588",
"finished_at":null,
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"status":"restoring",
"restore_type":"restore",
"snapshot_id":"2e56d167-bad7-43c7-8ede-a613c3fe7844",
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/c76b3355a164498aa95ddbc960adc238/restores/778baae0-6c64-4eb1-8fa3-29324215c43c"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/c76b3355a164498aa95ddbc960adc238/restores/778baae0-6c64-4eb1-8fa3-29324215c43c"
}
],
"name":"API",
"description":"API Created",
"host":"",
"size":0,
"uploaded_size":0,
"progress_percent":0,
"progress_msg":null,
"warning_msg":null,
"error_msg":null,
"time_taken":0,
"restore_options":{
"openstack":{
"instances":[
{
"vdisks":[
{
"new_volume_type":"iscsi",
"id":"365ad75b-ca76-46cb-8eea-435535fd2e22",
"availability_zone":"nova"
}
],
"name":"cirros-1-selective",
"availability_zone":"nova",
"nics":[
],
"flavor":{
"vcpus":1,
"disk":1,
"swap":"",
"ram":512,
"ephemeral":0,
"id":"1"
},
"include":true,
"id":"e33c1eea-c533-4945-864d-0da1fc002070"
},
{
"include":false,
"id":"67d6a100-fee6-4aa5-83a1-66b070d2eabe"
}
],
"restore_topology":false,
"networks_mapping":{
"networks":[
{
"snapshot_network":{
"subnet":{
"id":"b7b54304-aa82-4d50-91e6-66445ab56db4"
},
"id":"5fb7027d-a2ac-4a21-9ee1-438c281d2b26"
},
"target_network":{
"subnet":{
"id":"b7b54304-aa82-4d50-91e6-66445ab56db4"
},
"id":"5fb7027d-a2ac-4a21-9ee1-438c281d2b26",
"name":"internal"
}
}
]
}
},
"restore_type":"selective",
"type":"openstack",
"oneclickrestore":false
},
"metadata":[
]
}
}HTTP/1.1 202 Accepted
Server: nginx/1.16.1
Date: Mon, 09 Nov 2020 12:53:03 GMT
Content-Type: application/json
Content-Length: 1341
Connection: keep-alive
X-Compute-Request-Id: req-311fa97e-0fd7-41ed-873b-482c149ee743
{
"restore":{
"id":"0bf96f46-b27b-425c-a10f-a861cc18b82a",
"created_at":"2020-11-09T12:53:02.726757",
"updated_at":"2020-11-09T12:53:02.726757",
"finished_at":null,
"user_id":"ccddc7e7a015487fa02920f4d4979779",
"project_id":"c76b3355a164498aa95ddbc960adc238",
"status":"restoring",
"restore_type":"restore",
"snapshot_id":"ed4f29e8-7544-4e1c-af8a-a76031211926",
"links":[
{
"rel":"self",
"href":"http://wlm_backend/v1/c76b3355a164498aa95ddbc960adc238/restores/0bf96f46-b27b-425c-a10f-a861cc18b82a"
},
{
"rel":"bookmark",
"href":"http://wlm_backend/c76b3355a164498aa95ddbc960adc238/restores/0bf96f46-b27b-425c-a10f-a861cc18b82a"
}
],
"name":"API",
"description":"API description",
"host":"",
"size":0,
"uploaded_size":0,
"progress_percent":0,
"progress_msg":null,
"warning_msg":null,
"error_msg":null,
"time_taken":0,
"restore_options":{
"restore_type":"inplace",
"type":"openstack",
"oneclickrestore":false,
"openstack":{
"instances":[
{
"restore_boot_disk":true,
"include":true,
"id":"7c1bb5d2-aa5a-44f7-abcd-2d76b819b4c8",
"vdisks":[
{
"restore_cinder_volume":true,
"id":"f6b3fef6-4b0e-487e-84b5-47a14da716ca"
}
]
},
{
"restore_boot_disk":true,
"include":true,
"id":"08dab61c-6efd-44d3-a9ed-8e789d338c1b",
"vdisks":[
{
"restore_cinder_volume":true,
"id":"53204f34-019d-4ba8-ada1-e6ab7b8e5b43"
}
]
}
]
}
},
"metadata":[
]
}
}{
"restore":{
"options":{
"openstack":{
},
"type":"openstack",
"oneclickrestore":true,
"vmware":{},
"restore_type":"oneclick"
},
"name":"One Click Restore",
"description":"One Click Restore"
}
}{
"restore":{
"name":"<restore name>",
"description":"<restore description>",
"options":{
"openstack":{
"instances":[
{
"name":"<new name of instance>",
"include":<true/false>,
"id":"<original id of instance to be restored>"
"availability_zone":"<availability zone>",
"vdisks":[
{
"id":"<original ID of Volume>",
"new_volume_type":"<new volume type>",
"availability_zone":"<Volume availability zone>"
}
],
"nics":[
{
'mac_address':'<mac address of the pre-created port>',
'ip_address':'<IP of the pre-created port>',
'id':'<ID of the pre-created port>',
'network':{
'subnet':{
'id':'<ID of the subnet of the pre-created port>'
},
'id':'<ID of the network of the pre-created port>'
}
],
"flavor":{
"vcpus":<Integer>,
"disk":<Integer>,
"swap":<Integer>,
"ram":<Integer>,
"ephemeral":<Integer>,
"id":<Integer>
}
}
],
"restore_topology":<true/false>,
"networks_mapping":{
"networks":[
{
"snapshot_network":{
"subnet":{
"id":"<ID of the original Subnet ID>"
},
"id":"<ID of the original Network ID>"
},
"target_network":{
"subnet":{
"id":"<ID of the target Subnet ID>"
},
"id":"<ID of the target Network ID>",
"name":"<name of the target network>"
}
}
]
}
},
"restore_type":"selective",
"type":"openstack",
"oneclickrestore":false
}
}
}{
"restore":{
"name":"<restore-name>",
"description":"<restore-description>",
"options":{
"restore_type":"inplace",
"type":"openstack",
"oneclickrestore":false,
"openstack":{
"instances":[
{
"restore_boot_disk":<Boolean>,
"include":<Boolean>,
"id":"<ID of the instance the volumes are attached to>",
"vdisks":[
{
"restore_cinder_volume":<boolean>,
"id":"<ID of the Volume to restore>"
}
]
}
]
}
}
}
}# mount <NFS B2-IP/NFS B2-FQDN>:/<VOL-Path> /mntworkload_ac9cae9b-5e1b-4899-930c-6aa0600a2105/…/workload_<id>/workload_db <<< Contains User ID and Project ID of Workload owner
/…/workload_<id>/workload_vms_db <<< Contains VM IDs and VM Names of all VMs actively protected be the Workload# cp /mnt/workload_ac9cae9b-5e1b-4899-930c-6aa0600a2105 /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW0=/workload_ac9cae9b-5e1b-4899-930c-6aa0600a2105
# chown -R nova:nova /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW0=/workload_ac9cae9b-5e1b-4899-930c-6aa0600a2105
# chmod -R 644 /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW0=/workload_ac9cae9b-5e1b-4899-930c-6aa0600a2105#qemu-img info bd57ec9b-c4ac-4a37-a4fd-5c9aa002c778
image: bd57ec9b-c4ac-4a37-a4fd-5c9aa002c778
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 516K
cluster_size: 65536
backing file: /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW0=/workload_ac9cae9b-5e1b-4899-930c-6aa0600a2105/snapshot_1415095d-c047-400b-8b05-c88e57011263/vm_id_38b620f1-24ae-41d7-b0ab-85ffc2d7958b/vm_res_id_d4ab3431-5ce3-4a8f-a90b-07606e2ffa33_vda/7c39eb6a-6e42-418e-8690-b6368ecaa7bb
Format specific information:
compat: 1.1
lazy refcounts: false
refcount bits: 16
corrupt: false
# echo -n 10.10.2.20:/NFS_A1 | base64
MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl
# echo -n 10.20.3.22:/NFS_B2 | base64
MTAuMjAuMy4yMjovdXBzdHJlYW1fdGFyZ2V0#mkdir /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl
#mount --bind
/var/triliovault-mounts/MTAuMjAuMy4yMjovdXBzdHJlYW1fdGFyZ2V0/ /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl#vi /etc/fstab
/var/triliovault-mounts/MTAuMjAuMy4yMjovdXBzdHJlYW1fdGFyZ2V0/ / var/triliovault-mounts/ MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl none bind 0 0# source {customer admin rc file}
# openstack role add Admin --user <my_admin_user> --user-domain <admin_domain> --domain <target_domain>
# openstack role add Admin --user <my_admin_user> --user-domain <admin_domain> --project <target_project> --project-domain <target_domain>
# openstack role add <Backup Trustee Role> --user <my_admin_user> --user-domain <admin_domain> --project <destination_project> --project-domain <target_domain># workloadmgr workload-get-orphaned-workloads-list --migrate_cloud True
+------------+--------------------------------------+----------------------------------+----------------------------------+
| Name | ID | Project ID | User ID |
+------------+--------------------------------------+----------------------------------+----------------------------------+
| Workload_1 | 6639525d-736a-40c5-8133-5caaddaaa8e9 | 4224d3acfd394cc08228cc8072861a35 | 329880dedb4cd357579a3279835f392 |
| Workload_2 | 904e72f7-27bb-4235-9b31-13a636eb9c95 | 637a9ce3fd0d404cabf1a776696c9c04 | 329880dedb4cd357579a3279835f392 |
+------------+--------------------------------------+----------------------------------+----------------------------------+# openstack project list --domain <target_domain>
+----------------------------------+----------+
| ID | Name |
+----------------------------------+----------+
| 01fca51462a44bfa821130dce9baac1a | project1 |
| 33b4db1099ff4a65a4c1f69a14f932ee | project2 |
| 9139e694eb984a4a979b5ae8feb955af | project3 |
+----------------------------------+----------+ # openstack role assignment list --project <target_project> --project-domain <target_domain> --role <backup_trustee_role>
+----------------------------------+----------------------------------+-------+----------------------------------+--------+-----------+
| Role | User | Group | Project | Domain | Inherited |
+----------------------------------+----------------------------------+-------+----------------------------------+--------+-----------+
| 9fe2ff9ee4384b1894a90878d3e92bab | 72e65c264a694272928f5d84b73fe9ce | | 8e16700ae3614da4ba80a4e57d60cdb9 | | False |
| 9fe2ff9ee4384b1894a90878d3e92bab | d5fbd79f4e834f51bfec08be6d3b2ff2 | | 8e16700ae3614da4ba80a4e57d60cdb9 | | False |
| 9fe2ff9ee4384b1894a90878d3e92bab | f5b1d071816742fba6287d2c8ffcd6c4 | | 8e16700ae3614da4ba80a4e57d60cdb9 | | False |
+----------------------------------+----------------------------------+-------+----------------------------------+--------+-----------+# workloadmgr workload-reassign-workloads --new_tenant_id {target_project_id} --user_id {target_user_id} --workload_ids {workload_id} --migrate_cloud True
+-----------+--------------------------------------+----------------------------------+----------------------------------+
| Name | ID | Project ID | User ID |
+-----------+--------------------------------------+----------------------------------+----------------------------------+
| project1 | 904e72f7-27bb-4235-9b31-13a636eb9c95 | 4f2a91274ce9491481db795dcb10b04f | 3e05cac47338425d827193ba374749cc |
+-----------+--------------------------------------+----------------------------------+----------------------------------+ # workloadmgr workload-show ac9cae9b-5e1b-4899-930c-6aa0600a2105
+-------------------+------------------------------------------------------------------------------------------------------+
| Property | Value |
+-------------------+------------------------------------------------------------------------------------------------------+
| availability_zone | nova |
| created_at | 2019-04-18T02:19:39.000000 |
| description | Test Linux VMs |
| error_msg | None |
| id | ac9cae9b-5e1b-4899-930c-6aa0600a2105 |
| instances | [{"id": "38b620f1-24ae-41d7-b0ab-85ffc2d7958b", "name": "Test-Linux-1"}, {"id": |
| | "3fd869b2-16bd-4423-b389-18d19d37c8e0", "name": "Test-Linux-2"}] |
| interval | None |
| jobschedule | True |
| name | Test Linux |
| project_id | 2fc4e2180c2745629753305591aeb93b |
| scheduler_trust | None |
| status | available |
| storage_usage | {"usage": 60555264, "full": {"usage": 44695552, "snap_count": 1}, "incremental": {"usage": 15859712, |
| | "snap_count": 13}} |
| updated_at | 2019-11-15T02:32:43.000000 |
| user_id | 72e65c264a694272928f5d84b73fe9ce |
| workload_type_id | f82ce76f-17fe-438b-aa37-7a023058e50d |
+-------------------+------------------------------------------------------------------------------------------------------+# workloadmgr snapshot-list --workload_id ac9cae9b-5e1b-4899-930c-6aa0600a2105 --all True
+----------------------------+--------------+--------------------------------------+--------------------------------------+---------------+-----------+-----------+
| Created At | Name | ID | Workload ID | Snapshot Type | Status | Host |
+----------------------------+--------------+--------------------------------------+--------------------------------------+---------------+-----------+-----------+
| 2019-11-02T02:30:02.000000 | jobscheduler | f5b8c3fd-c289-487d-9d50-fe27a6561d78 | ac9cae9b-5e1b-4899-930c-6aa0600a2105 | full | available | Upstream2 |
| 2019-11-03T02:30:02.000000 | jobscheduler | 7e39e544-537d-4417-853d-11463e7396f9 | ac9cae9b-5e1b-4899-930c-6aa0600a2105 | incremental | available | Upstream2 |
| 2019-11-04T02:30:02.000000 | jobscheduler | 0c086f3f-fa5d-425f-b07e-a1adcdcafea9 | ac9cae9b-5e1b-4899-930c-6aa0600a2105 | incremental | available | Upstream2 |
+----------------------------+--------------+--------------------------------------+--------------------------------------+---------------+-----------+-----------+# workloadmgr snapshot-show --output networks 7e39e544-537d-4417-853d-11463e7396f9
+-------------------+--------------------------------------+
| Snapshot property | Value |
+-------------------+--------------------------------------+
| description | None |
| host | Upstream2 |
| id | 7e39e544-537d-4417-853d-11463e7396f9 |
| name | jobscheduler |
| progress_percent | 100 |
| restore_size | 44040192 Bytes or Approx (42.0MB) |
| restores_info | |
| size | 1310720 Bytes or Approx (1.2MB) |
| snapshot_type | incremental |
| status | available |
| time_taken | 154 Seconds |
| uploaded_size | 1310720 |
| workload_id | ac9cae9b-5e1b-4899-930c-6aa0600a2105 |
+-------------------+--------------------------------------+
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Instances | Value |
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-1 |
| ID | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| | |
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-2 |
| ID | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| | |
+----------------+---------------------------------------------------------------------------------------------------------------------+
+-------------+----------------------------------------------------------------------------------------------------------------------------------------------+
| Networks | Value |
+-------------+----------------------------------------------------------------------------------------------------------------------------------------------+
| ip_address | 172.20.20.20 |
| vm_id | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| network | {u'subnet': {u'ip_version': 4, u'cidr': u'172.20.20.0/24', u'gateway_ip': u'172.20.20.1', u'id': u'3a756a89-d979-4cda-a7f3-dacad8594e44',
u'name': u'Trilio Test'}, u'cidr': None, u'id': u'5f0e5d34-569d-42c9-97c2-df944f3924b1', u'name': u'Trilio_Test_Internal', u'network_type': u'neutron'} |
| mac_address | fa:16:3e:74:58:bb |
| | |
| ip_address | 172.20.20.13 |
| vm_id | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| network | {u'subnet': {u'ip_version': 4, u'cidr': u'172.20.20.0/24', u'gateway_ip': u'172.20.20.1', u'id': u'3a756a89-d979-4cda-a7f3-dacad8594e44',
u'name': u'Trilio Test'}, u'cidr': None, u'id': u'5f0e5d34-569d-42c9-97c2-df944f3924b1', u'name': u'Trilio_Test_Internal', u'network_type': u'neutron'} |
| mac_address | fa:16:3e:6b:46:ae |
+-------------+----------------------------------------------------------------------------------------------------------------------------------------------+[root@upstreamcontroller ~(keystone_admin)]# workloadmgr snapshot-show --output disks 7e39e544-537d-4417-853d-11463e7396f9
+-------------------+--------------------------------------+
| Snapshot property | Value |
+-------------------+--------------------------------------+
| description | None |
| host | Upstream2 |
| id | 7e39e544-537d-4417-853d-11463e7396f9 |
| name | jobscheduler |
| progress_percent | 100 |
| restore_size | 44040192 Bytes or Approx (42.0MB) |
| restores_info | |
| size | 1310720 Bytes or Approx (1.2MB) |
| snapshot_type | incremental |
| status | available |
| time_taken | 154 Seconds |
| uploaded_size | 1310720 |
| workload_id | ac9cae9b-5e1b-4899-930c-6aa0600a2105 |
+-------------------+--------------------------------------+
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Instances | Value |
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-1 |
| ID | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| | |
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-2 |
| ID | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| | |
+----------------+---------------------------------------------------------------------------------------------------------------------+
+-------------------+--------------------------------------------------+
| Vdisks | Value |
+-------------------+--------------------------------------------------+
| volume_mountpoint | /dev/vda |
| restore_size | 22020096 |
| resource_id | ebc2fdd0-3c4d-4548-b92d-0e16734b5d9a |
| volume_name | 0027b140-a427-46cb-9ccf-7895c7624493 |
| volume_type | None |
| label | None |
| volume_size | 1 |
| volume_id | 0027b140-a427-46cb-9ccf-7895c7624493 |
| availability_zone | nova |
| vm_id | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| metadata | {u'readonly': u'False', u'attached_mode': u'rw'} |
| | |
| volume_mountpoint | /dev/vda |
| restore_size | 22020096 |
| resource_id | 8007ed89-6a86-447e-badb-e49f1e92f57a |
| volume_name | 2a7f9e78-7778-4452-af5b-8e2fa43853bd |
| volume_type | None |
| label | None |
| volume_size | 1 |
| volume_id | 2a7f9e78-7778-4452-af5b-8e2fa43853bd |
| availability_zone | nova |
| vm_id | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| metadata | {u'readonly': u'False', u'attached_mode': u'rw'} |
| | |
+-------------------+--------------------------------------------------+{
u'description':u'<description of the restore>',
u'oneclickrestore':False,
u'restore_type':u'selective',
u'type':u'openstack',
u'name':u'<name of the restore>'
u'openstack':{
u'instances':[
{
u'name':u'<name instance 1>',
u'availability_zone':u'<AZ instance 1>',
u'nics':[ #####Leave empty for network topology restore
],
u'vdisks':[
{
u'id':u'<old disk id>',
u'new_volume_type':u'<new volume type name>',
u'availability_zone':u'<new cinder volume AZ>'
}
],
u'flavor':{
u'ram':<RAM in MB>,
u'ephemeral':<GB of ephemeral disk>,
u'vcpus':<# vCPUs>,
u'swap':u'<GB of Swap disk>',
u'disk':<GB of boot disk>,
u'id':u'<id of the flavor to use>'
},
u'include':<True/False>,
u'id':u'<old id of the instance>'
} #####Repeat for each instance in the snapshot
],
u'restore_topology':<True/False>,
u'networks_mapping':{
u'networks':[ #####Leave empty for network topology restore
]
}
}
}
# workloadmgr snapshot-selective-restore --filename restore.json {snapshot id}[root@upstreamcontroller ~(keystone_admin)]# workloadmgr restore-list --snapshot_id 5928554d-a882-4881-9a5c-90e834c071af
+----------------------------+------------------+--------------------------------------+--------------------------------------+----------+-----------+
| Created At | Name | ID | Snapshot ID | Size | Status |
+----------------------------+------------------+--------------------------------------+--------------------------------------+----------+-----------+
| 2019-09-24T12:44:38.000000 | OneClick Restore | 5b4216d0-4bed-460f-8501-1589e7b45e01 | 5928554d-a882-4881-9a5c-90e834c071af | 41126400 | available |
+----------------------------+------------------+--------------------------------------+--------------------------------------+----------+-----------+
[root@upstreamcontroller ~(keystone_admin)]# workloadmgr restore-show 5b4216d0-4bed-460f-8501-1589e7b45e01
+------------------+------------------------------------------------------------------------------------------------------+
| Property | Value |
+------------------+------------------------------------------------------------------------------------------------------+
| created_at | 2019-09-24T12:44:38.000000 |
| description | - |
| error_msg | None |
| finished_at | 2019-09-24T12:46:07.000000 |
| host | Upstream2 |
| id | 5b4216d0-4bed-460f-8501-1589e7b45e01 |
| instances | [{"status": "available", "id": "b8506f04-1b99-4ca8-839b-6f5d2c20d9aa", "name": "temp", "metadata": |
| | {"instance_id": "c014a938-903d-43db-bfbb-ea4998ff1a0f", "production": "1", "config_drive": ""}}] |
| name | OneClick Restore |
| progress_msg | Restore from snapshot is complete |
| progress_percent | 100 |
| project_id | 8e16700ae3614da4ba80a4e57d60cdb9 |
| restore_options | {"description": "-", "oneclickrestore": true, "restore_type": "oneclick", "openstack": {"instances": |
| | [{"availability_zone": "US-West", "id": "c014a938-903d-43db-bfbb-ea4998ff1a0f", "name": "temp"}]}, |
| | "type": "openstack", "name": "OneClick Restore"} |
| restore_type | restore |
| size | 41126400 |
| snapshot_id | 5928554d-a882-4881-9a5c-90e834c071af |
| status | available |
| time_taken | 89 |
| updated_at | 2019-09-24T12:44:38.000000 |
| uploaded_size | 41126400 |
| user_id | d5fbd79f4e834f51bfec08be6d3b2ff2 |
| warning_msg | None |
| workload_id | 02b1aca2-c51a-454b-8c0f-99966314165e |
+------------------+------------------------------------------------------------------------------------------------------+# workloadmgr workload-delete <workload_id># source {customer admin rc file}
# openstack role remove Admin --user <my_admin_user> --user-domain <admin_domain> --domain <target_domain>
# openstack role remove Admin --user <my_admin_user> --user-domain <admin_domain> --project <target_project> --project-domain <target_domain>
# openstack role remove <Backup Trustee Role> --user <my_admin_user> --user-domain <admin_domain> --project <destination_project> --project-domain <target_domain>
# vi /etc/workloadmgr/workloadmgr.confvault_storage_nfs_export = <NFS_B1/NFS_B1-FQDN>:/<VOL-B1-Path>vault_storage_nfs_export = <NFS-IP/NFS-FQDN>:/<VOL-1-Path>,<NFS-IP/NFS-FQDN>:/<VOL—2-Path># systemctl restart wlm-workloads# vi /etc/tvault-contego/tvault-contego.confvault_storage_nfs_export = <NFS_B1-IP/NFS_B1-FQDN>:/<VOL-B1-Path>vault_storage_nfs_export = <NFS_B1-IP/NFS-FQDN>:/<VOL-B1-Path>,<NFS_B2-IP/NFS-FQDN>:/<VOL—B2-Path># systemctl restart tvault-contego#qemu-img info bd57ec9b-c4ac-4a37-a4fd-5c9aa002c778
image: bd57ec9b-c4ac-4a37-a4fd-5c9aa002c778
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 516K
cluster_size: 65536
backing file: /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW0=/workload_ac9cae9b-5e1b-4899-930c-6aa0600a2105/snapshot_1415095d-c047-400b-8b05-c88e57011263/vm_id_38b620f1-24ae-41d7-b0ab-85ffc2d7958b/vm_res_id_d4ab3431-5ce3-4a8f-a90b-07606e2ffa33_vda/7c39eb6a-6e42-418e-8690-b6368ecaa7bb
Format specific information:
compat: 1.1
lazy refcounts: false
refcount bits: 16
corrupt: false
# echo -n 10.10.2.20:/NFS_A1 | base64
MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl
# echo -n 10.20.3.22:/NFS_B2 | base64
MTAuMjAuMy4yMjovdXBzdHJlYW1fdGFyZ2V0#mkdir /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl
#mount --bind
/var/triliovault-mounts/MTAuMjAuMy4yMjovdXBzdHJlYW1fdGFyZ2V0/ /var/triliovault-mounts/MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl#vi /etc/fstab
/var/triliovault-mounts/MTAuMjAuMy4yMjovdXBzdHJlYW1fdGFyZ2V0/ / var/triliovault-mounts/ MTAuMTAuMi4yMDovdXBzdHJlYW1fc291cmNl none bind 0 0# source {customer admin rc file}
# openstack role add Admin --user <my_admin_user> --user-domain <admin_domain> --domain <target_domain>
# openstack role add Admin --user <my_admin_user> --user-domain <admin_domain> --project <target_project> --project-domain <target_domain>
# openstack role add <Backup Trustee Role> --user <my_admin_user> --user-domain <admin_domain> --project <destination_project> --project-domain <target_domain># workloadmgr workload-get-orphaned-workloads-list --migrate_cloud True
+------------+--------------------------------------+----------------------------------+----------------------------------+
| Name | ID | Project ID | User ID |
+------------+--------------------------------------+----------------------------------+----------------------------------+
| Workload_1 | 6639525d-736a-40c5-8133-5caaddaaa8e9 | 4224d3acfd394cc08228cc8072861a35 | 329880dedb4cd357579a3279835f392 |
| Workload_2 | 904e72f7-27bb-4235-9b31-13a636eb9c95 | 637a9ce3fd0d404cabf1a776696c9c04 | 329880dedb4cd357579a3279835f392 |
+------------+--------------------------------------+----------------------------------+----------------------------------+# openstack project list --domain <target_domain>
+----------------------------------+----------+
| ID | Name |
+----------------------------------+----------+
| 01fca51462a44bfa821130dce9baac1a | project1 |
| 33b4db1099ff4a65a4c1f69a14f932ee | project2 |
| 9139e694eb984a4a979b5ae8feb955af | project3 |
+----------------------------------+----------+ # openstack role assignment list --project <target_project> --project-domain <target_domain> --role <backup_trustee_role>
+----------------------------------+----------------------------------+-------+----------------------------------+--------+-----------+
| Role | User | Group | Project | Domain | Inherited |
+----------------------------------+----------------------------------+-------+----------------------------------+--------+-----------+
| 9fe2ff9ee4384b1894a90878d3e92bab | 72e65c264a694272928f5d84b73fe9ce | | 8e16700ae3614da4ba80a4e57d60cdb9 | | False |
| 9fe2ff9ee4384b1894a90878d3e92bab | d5fbd79f4e834f51bfec08be6d3b2ff2 | | 8e16700ae3614da4ba80a4e57d60cdb9 | | False |
| 9fe2ff9ee4384b1894a90878d3e92bab | f5b1d071816742fba6287d2c8ffcd6c4 | | 8e16700ae3614da4ba80a4e57d60cdb9 | | False |
+----------------------------------+----------------------------------+-------+----------------------------------+--------+-----------+# workloadmgr workload-reassign-workloads --new_tenant_id {target_project_id} --user_id {target_user_id} --workload_ids {workload_id} --migrate_cloud True
+-----------+--------------------------------------+----------------------------------+----------------------------------+
| Name | ID | Project ID | User ID |
+-----------+--------------------------------------+----------------------------------+----------------------------------+
| project1 | 904e72f7-27bb-4235-9b31-13a636eb9c95 | 4f2a91274ce9491481db795dcb10b04f | 3e05cac47338425d827193ba374749cc |
+-----------+--------------------------------------+----------------------------------+----------------------------------+ # workloadmgr workload-show ac9cae9b-5e1b-4899-930c-6aa0600a2105
+-------------------+------------------------------------------------------------------------------------------------------+
| Property | Value |
+-------------------+------------------------------------------------------------------------------------------------------+
| availability_zone | nova |
| created_at | 2019-04-18T02:19:39.000000 |
| description | Test Linux VMs |
| error_msg | None |
| id | ac9cae9b-5e1b-4899-930c-6aa0600a2105 |
| instances | [{"id": "38b620f1-24ae-41d7-b0ab-85ffc2d7958b", "name": "Test-Linux-1"}, {"id": |
| | "3fd869b2-16bd-4423-b389-18d19d37c8e0", "name": "Test-Linux-2"}] |
| interval | None |
| jobschedule | True |
| name | Test Linux |
| project_id | 2fc4e2180c2745629753305591aeb93b |
| scheduler_trust | None |
| status | available |
| storage_usage | {"usage": 60555264, "full": {"usage": 44695552, "snap_count": 1}, "incremental": {"usage": 15859712, |
| | "snap_count": 13}} |
| updated_at | 2019-11-15T02:32:43.000000 |
| user_id | 72e65c264a694272928f5d84b73fe9ce |
| workload_type_id | f82ce76f-17fe-438b-aa37-7a023058e50d |
+-------------------+------------------------------------------------------------------------------------------------------+# workloadmgr snapshot-list --workload_id ac9cae9b-5e1b-4899-930c-6aa0600a2105 --all True
+----------------------------+--------------+--------------------------------------+--------------------------------------+---------------+-----------+-----------+
| Created At | Name | ID | Workload ID | Snapshot Type | Status | Host |
+----------------------------+--------------+--------------------------------------+--------------------------------------+---------------+-----------+-----------+
| 2019-11-02T02:30:02.000000 | jobscheduler | f5b8c3fd-c289-487d-9d50-fe27a6561d78 | ac9cae9b-5e1b-4899-930c-6aa0600a2105 | full | available | Upstream2 |
| 2019-11-03T02:30:02.000000 | jobscheduler | 7e39e544-537d-4417-853d-11463e7396f9 | ac9cae9b-5e1b-4899-930c-6aa0600a2105 | incremental | available | Upstream2 |
| 2019-11-04T02:30:02.000000 | jobscheduler | 0c086f3f-fa5d-425f-b07e-a1adcdcafea9 | ac9cae9b-5e1b-4899-930c-6aa0600a2105 | incremental | available | Upstream2 |
+----------------------------+--------------+--------------------------------------+--------------------------------------+---------------+-----------+-----------+# workloadmgr snapshot-show --output networks 7e39e544-537d-4417-853d-11463e7396f9
+-------------------+--------------------------------------+
| Snapshot property | Value |
+-------------------+--------------------------------------+
| description | None |
| host | Upstream2 |
| id | 7e39e544-537d-4417-853d-11463e7396f9 |
| name | jobscheduler |
| progress_percent | 100 |
| restore_size | 44040192 Bytes or Approx (42.0MB) |
| restores_info | |
| size | 1310720 Bytes or Approx (1.2MB) |
| snapshot_type | incremental |
| status | available |
| time_taken | 154 Seconds |
| uploaded_size | 1310720 |
| workload_id | ac9cae9b-5e1b-4899-930c-6aa0600a2105 |
+-------------------+--------------------------------------+
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Instances | Value |
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-1 |
| ID | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| | |
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-2 |
| ID | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| | |
+----------------+---------------------------------------------------------------------------------------------------------------------+
+-------------+----------------------------------------------------------------------------------------------------------------------------------------------+
| Networks | Value |
+-------------+----------------------------------------------------------------------------------------------------------------------------------------------+
| ip_address | 172.20.20.20 |
| vm_id | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| network | {u'subnet': {u'ip_version': 4, u'cidr': u'172.20.20.0/24', u'gateway_ip': u'172.20.20.1', u'id': u'3a756a89-d979-4cda-a7f3-dacad8594e44',
u'name': u'Trilio Test'}, u'cidr': None, u'id': u'5f0e5d34-569d-42c9-97c2-df944f3924b1', u'name': u'Trilio_Test_Internal', u'network_type': u'neutron'} |
| mac_address | fa:16:3e:74:58:bb |
| | |
| ip_address | 172.20.20.13 |
| vm_id | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| network | {u'subnet': {u'ip_version': 4, u'cidr': u'172.20.20.0/24', u'gateway_ip': u'172.20.20.1', u'id': u'3a756a89-d979-4cda-a7f3-dacad8594e44',
u'name': u'Trilio Test'}, u'cidr': None, u'id': u'5f0e5d34-569d-42c9-97c2-df944f3924b1', u'name': u'Trilio_Test_Internal', u'network_type': u'neutron'} |
| mac_address | fa:16:3e:6b:46:ae |
+-------------+----------------------------------------------------------------------------------------------------------------------------------------------+[root@upstreamcontroller ~(keystone_admin)]# workloadmgr snapshot-show --output disks 7e39e544-537d-4417-853d-11463e7396f9
+-------------------+--------------------------------------+
| Snapshot property | Value |
+-------------------+--------------------------------------+
| description | None |
| host | Upstream2 |
| id | 7e39e544-537d-4417-853d-11463e7396f9 |
| name | jobscheduler |
| progress_percent | 100 |
| restore_size | 44040192 Bytes or Approx (42.0MB) |
| restores_info | |
| size | 1310720 Bytes or Approx (1.2MB) |
| snapshot_type | incremental |
| status | available |
| time_taken | 154 Seconds |
| uploaded_size | 1310720 |
| workload_id | ac9cae9b-5e1b-4899-930c-6aa0600a2105 |
+-------------------+--------------------------------------+
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Instances | Value |
+----------------+---------------------------------------------------------------------------------------------------------------------+
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-1 |
| ID | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| | |
| Status | available |
| Security Group | [{u'name': u'Test', u'security_group_type': u'neutron'}, {u'name': u'default', u'security_group_type': u'neutron'}] |
| Flavor | {u'ephemeral': u'0', u'vcpus': u'1', u'disk': u'1', u'ram': u'512'} |
| Name | Test-Linux-2 |
| ID | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| | |
+----------------+---------------------------------------------------------------------------------------------------------------------+
+-------------------+--------------------------------------------------+
| Vdisks | Value |
+-------------------+--------------------------------------------------+
| volume_mountpoint | /dev/vda |
| restore_size | 22020096 |
| resource_id | ebc2fdd0-3c4d-4548-b92d-0e16734b5d9a |
| volume_name | 0027b140-a427-46cb-9ccf-7895c7624493 |
| volume_type | None |
| label | None |
| volume_size | 1 |
| volume_id | 0027b140-a427-46cb-9ccf-7895c7624493 |
| availability_zone | nova |
| vm_id | 38b620f1-24ae-41d7-b0ab-85ffc2d7958b |
| metadata | {u'readonly': u'False', u'attached_mode': u'rw'} |
| | |
| volume_mountpoint | /dev/vda |
| restore_size | 22020096 |
| resource_id | 8007ed89-6a86-447e-badb-e49f1e92f57a |
| volume_name | 2a7f9e78-7778-4452-af5b-8e2fa43853bd |
| volume_type | None |
| label | None |
| volume_size | 1 |
| volume_id | 2a7f9e78-7778-4452-af5b-8e2fa43853bd |
| availability_zone | nova |
| vm_id | 3fd869b2-16bd-4423-b389-18d19d37c8e0 |
| metadata | {u'readonly': u'False', u'attached_mode': u'rw'} |
| | |
+-------------------+--------------------------------------------------+{
u'description':u'<description of the restore>',
u'oneclickrestore':False,
u'restore_type':u'selective',
u'type':u'openstack',
u'name':u'<name of the restore>'
u'openstack':{
u'instances':[
{
u'name':u'<name instance 1>',
u'availability_zone':u'<AZ instance 1>',
u'nics':[ #####Leave empty for network topology restore
],
u'vdisks':[
{
u'id':u'<old disk id>',
u'new_volume_type':u'<new volume type name>',
u'availability_zone':u'<new cinder volume AZ>'
}
],
u'flavor':{
u'ram':<RAM in MB>,
u'ephemeral':<GB of ephemeral disk>,
u'vcpus':<# vCPUs>,
u'swap':u'<GB of Swap disk>',
u'disk':<GB of boot disk>,
u'id':u'<id of the flavor to use>'
},
u'include':<True/False>,
u'id':u'<old id of the instance>'
} #####Repeat for each instance in the snapshot
],
u'restore_topology':<True/False>,
u'networks_mapping':{
u'networks':[ #####Leave empty for network topology restore
]
}
}
}
# workloadmgr snapshot-selective-restore --filename restore.json {snapshot id}[root@upstreamcontroller ~(keystone_admin)]# workloadmgr restore-list --snapshot_id 5928554d-a882-4881-9a5c-90e834c071af
+----------------------------+------------------+--------------------------------------+--------------------------------------+----------+-----------+
| Created At | Name | ID | Snapshot ID | Size | Status |
+----------------------------+------------------+--------------------------------------+--------------------------------------+----------+-----------+
| 2019-09-24T12:44:38.000000 | OneClick Restore | 5b4216d0-4bed-460f-8501-1589e7b45e01 | 5928554d-a882-4881-9a5c-90e834c071af | 41126400 | available |
+----------------------------+------------------+--------------------------------------+--------------------------------------+----------+-----------+
[root@upstreamcontroller ~(keystone_admin)]# workloadmgr restore-show 5b4216d0-4bed-460f-8501-1589e7b45e01
+------------------+------------------------------------------------------------------------------------------------------+
| Property | Value |
+------------------+------------------------------------------------------------------------------------------------------+
| created_at | 2019-09-24T12:44:38.000000 |
| description | - |
| error_msg | None |
| finished_at | 2019-09-24T12:46:07.000000 |
| host | Upstream2 |
| id | 5b4216d0-4bed-460f-8501-1589e7b45e01 |
| instances | [{"status": "available", "id": "b8506f04-1b99-4ca8-839b-6f5d2c20d9aa", "name": "temp", "metadata": |
| | {"instance_id": "c014a938-903d-43db-bfbb-ea4998ff1a0f", "production": "1", "config_drive": ""}}] |
| name | OneClick Restore |
| progress_msg | Restore from snapshot is complete |
| progress_percent | 100 |
| project_id | 8e16700ae3614da4ba80a4e57d60cdb9 |
| restore_options | {"description": "-", "oneclickrestore": true, "restore_type": "oneclick", "openstack": {"instances": |
| | [{"availability_zone": "US-West", "id": "c014a938-903d-43db-bfbb-ea4998ff1a0f", "name": "temp"}]}, |
| | "type": "openstack", "name": "OneClick Restore"} |
| restore_type | restore |
| size | 41126400 |
| snapshot_id | 5928554d-a882-4881-9a5c-90e834c071af |
| status | available |
| time_taken | 89 |
| updated_at | 2019-09-24T12:44:38.000000 |
| uploaded_size | 41126400 |
| user_id | d5fbd79f4e834f51bfec08be6d3b2ff2 |
| warning_msg | None |
| workload_id | 02b1aca2-c51a-454b-8c0f-99966314165e |
+------------------+------------------------------------------------------------------------------------------------------+# vi /etc/workloadmgr/workloadmgr.confvault_storage_nfs_export = <NFS_B1-IP/NFS-FQDN>:/<VOL-B1-Path>,<NFS_B2-IP/NFS-FQDN>:/<VOL—B2-Path>vault_storage_nfs_export = <NFS_B1-IP/NFS_B1-FQDN>:/<VOL-B1-Path># systemctl restart wlm-workloads# vi /etc/tvault-contego/tvault-contego.confvault_storage_nfs_export = <NFS_B1-IP/NFS-FQDN>:/<VOL-B1-Path>,<NFS_B2-IP/NFS-FQDN>:/<VOL—B2-Path>vault_storage_nfs_export = <NFS-IP/NFS-FQDN>:/<VOL-1-Path># systemctl restart tvault-contego# source {customer admin rc file}
# openstack role remove Admin --user <my_admin_user> --user-domain <admin_domain> --domain <target_domain>
# openstack role remove Admin --user <my_admin_user> --user-domain <admin_domain> --project <target_project> --project-domain <target_domain>
# openstack role remove <Backup Trustee Role> --user <my_admin_user> --user-domain <admin_domain> --project <destination_project> --project-domain <target_domain>
