Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Generate rook storage nodes from ceph-resource nodes and add flexibility to Helm values overall #1609

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 49 additions & 28 deletions roles/rook/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ rook_placement_cephobjectstore:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "node-role.osism.tech/{{ rook_placement_label_mds }}"
- key: "node-role.osism.tech/{{ rook_placement_label_rgw }}"
operator: In
values:
- "true"
Expand All @@ -174,7 +174,7 @@ rook_placement_cephfilesystem:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "node-role.osism.tech/{{ rook_placement_label_rgw }}"
- key: "node-role.osism.tech/{{ rook_placement_label_mds }}"
operator: In
values:
- "true"
Expand Down Expand Up @@ -299,6 +299,27 @@ rook_cephconfig: {}
# "osd.*":
# osd_max_scrubs: "10"

rook_mon:
# Set the number of mons to be started. Generally recommended to be 3.
# For highest availability, an odd number of mons should be specified.
count: "{{ rook_mon_count }}"
# The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
# Mons should only be allowed on the same node for test environments where data loss is acceptable.
allowMultiplePerNode: false

rook_mgr:
# When higher availability of the mgr is needed, increase the count to 2.
# In that case, one mgr will be active and one in standby. When Ceph updates which
# mgr is active, Rook will update the mgr services to match the active mgr.
count: "{{ rook_mgr_count }}"
allowMultiplePerNode: false
modules:
# List of modules to optionally enable or disable.
# Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR.
# - name: rook
# enabled: true
"{{ rook_mgr_modules }}"

##############################################
###
### Storage Configuration
Expand All @@ -316,35 +337,35 @@ rook_storage_config_encrypteddevice: "true"
# define a device filter where to create OSDs
rook_storage_devicefilter: ""
# name nodes where to create OSDs
rook_storage_nodes: []
# rook_storage_nodes: [] #TODO: deprecate?
# - name: "testbed-node-0"
# - name: "testbed-node-1"
# - name: "testbed-node-2"
rook_storage:
useAllNodes: "{{ rook_storage_useallnodes }}"
useAllDevices: "{{ rook_storage_usealldevices }}"
# deviceFilter:
config:
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
osdsPerDevice: "{{ rook_storage_config_osdsperdevice }}" # this value can be overridden at the node or device level
encryptedDevice: "{{ rook_storage_config_encrypteddevice }}" # the default value for this option is "false"
# # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
# # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
# nodes:
# - name: "172.17.4.201"
# devices: # specific devices to use for storage can be specified for each node
# - name: "sdb"
# - name: "nvme01" # multiple osds can be created on high performance devices
# config:
# osdsPerDevice: "5"
# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
# config: # configuration can be specified at the node level which overrides the cluster level config
# - name: "172.17.4.301"
# deviceFilter: "^sd."
deviceFilter: "{{ rook_storage_devicefilter }}"
nodes: "{{ rook_storage_nodes }}"
# rook_storage:
# useAllNodes: "{{ rook_storage_useallnodes }}"
# useAllDevices: "{{ rook_storage_usealldevices }}"
# # deviceFilter:
# config:
# # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
# # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
# # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
# osdsPerDevice: "{{ rook_storage_config_osdsperdevice }}" # this value can be overridden at the node or device level
# encryptedDevice: "{{ rook_storage_config_encrypteddevice }}" # the default value for this option is "false"
# # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
# # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
# # nodes:
# # - name: "172.17.4.201"
# # devices: # specific devices to use for storage can be specified for each node
# # - name: "sdb"
# # - name: "nvme01" # multiple osds can be created on high performance devices
# # config:
# # osdsPerDevice: "5"
# # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
# # config: # configuration can be specified at the node level which overrides the cluster level config
# # - name: "172.17.4.301"
# # deviceFilter: "^sd."
# deviceFilter: "{{ rook_storage_devicefilter }}"
# nodes: "{{ rook_storage_nodes }}"

##############################################
###
Expand Down
48 changes: 32 additions & 16 deletions roles/rook/templates/01-helm-values-all.yml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -123,25 +123,27 @@ cephClusterSpec:
upgradeOSDRequiresHealthyPGs: false

mon:
{{ rook_mon }}
# Set the number of mons to be started. Generally recommended to be 3.
# For highest availability, an odd number of mons should be specified.
count: {{ rook_mon_count }}
# count: {{ rook_mon_count }}
# The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
# Mons should only be allowed on the same node for test environments where data loss is acceptable.
allowMultiplePerNode: false
# allowMultiplePerNode: false

mgr:
{{ rook_mgr }}
# When higher availability of the mgr is needed, increase the count to 2.
# In that case, one mgr will be active and one in standby. When Ceph updates which
# mgr is active, Rook will update the mgr services to match the active mgr.
count: {{ rook_mgr_count }}
allowMultiplePerNode: false
modules:
# count: {{ rook_mgr_count }}
# allowMultiplePerNode: false
#modules:
# List of modules to optionally enable or disable.
# Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR.
# - name: rook
# enabled: true
{{ rook_mgr_modules }}
# {{ rook_mgr_modules }}

# enable the ceph dashboard for viewing cluster status
dashboard:
Expand Down Expand Up @@ -355,16 +357,19 @@ cephClusterSpec:

# cluster level storage configuration and selection
storage:
{% if rook_storage %}
{{ rook_storage }}
# useAllNodes: false
# useAllDevices: false
{% else %}
#otherwise use the following values
useAllNodes: {{ rook_storage_useallnodes }}
useAllDevices: {{ rook_storage_usealldevices }}
# deviceFilter:
# config:
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
# osdsPerDevice: "1" # this value can be overridden at the node or device level
# encryptedDevice: "true" # the default value for this option is "false"
config:
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
osdsPerDevice: "{{ rook_storage_config_osdsperdevice }}" # this value can be overridden at the node or device level
encryptedDevice: "{{ rook_storage_config_encrypteddevice }}" # the default value for this option is "false"
# # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
# # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
# nodes:
Expand All @@ -378,8 +383,19 @@ cephClusterSpec:
# config: # configuration can be specified at the node level which overrides the cluster level config
# - name: "172.17.4.301"
# deviceFilter: "^sd."
# deviceFilter: "^sd."
# nodes: []
deviceFilter: "{{ rook_storage_devicefilter }}"
{% if rook_storage_nodes is defined %}
nodes: "{{ rook_storage_nodes }}"
{% else %}
nodes:
{% for host in groups['ceph-resource'] %}
- name: "{{ hostvars[host]['ansible_hostname'] }}"
{% if hostvars[host]['rook_storage_devicefilter'] is defined %}
deviceFilter: "{{ hostvars[host]['rook_storage_devicefilter'] }}"
{% endif %}
{% endfor %}
{% endif %}
{% endif %}

# The section for configuring management of daemon disruptions during upgrade or fencing.
disruptionManagement:
Expand Down