From 5556aa1e2560f7273ddb1a9eec4e3a817e37e7ad Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 14:04:11 +0100 Subject: [PATCH 01/39] PMM-13992: New Percona Server for MySQL setup with cgroups support. --- .../data/my-async-replication-57.cnf.j2 | 38 ++++ .../data/my-async-replication.cnf.j2 | 42 +++++ .../data/my-group-replication-57.cnf.j2 | 49 +++++ .../data/my-group-replication.cnf.j2 | 45 +++++ .../percona_server_for_mysql/data/my.cnf.j2 | 3 + .../percona-server-setup.yml | 173 ++++++++++++++++++ ...percona-server-async-replication-setup.yml | 81 ++++++++ ...percona-server-group-replication-setup.yml | 120 ++++++++++++ .../tasks/percona-server-setup-single.yml | 8 + .../tasks/prepare_install_ps.yml | 72 ++++++++ pmm_qa/pmm-framework.py | 48 ++--- pmm_qa/scripts/database_options.py | 2 +- 12 files changed, 644 insertions(+), 37 deletions(-) create mode 100644 pmm_qa/percona_server_for_mysql/data/my-async-replication-57.cnf.j2 create mode 100644 pmm_qa/percona_server_for_mysql/data/my-async-replication.cnf.j2 create mode 100644 pmm_qa/percona_server_for_mysql/data/my-group-replication-57.cnf.j2 create mode 100644 pmm_qa/percona_server_for_mysql/data/my-group-replication.cnf.j2 create mode 100644 pmm_qa/percona_server_for_mysql/data/my.cnf.j2 create mode 100644 pmm_qa/percona_server_for_mysql/percona-server-setup.yml create mode 100644 pmm_qa/percona_server_for_mysql/tasks/percona-server-async-replication-setup.yml create mode 100644 pmm_qa/percona_server_for_mysql/tasks/percona-server-group-replication-setup.yml create mode 100644 pmm_qa/percona_server_for_mysql/tasks/percona-server-setup-single.yml create mode 100644 pmm_qa/percona_server_for_mysql/tasks/prepare_install_ps.yml diff --git a/pmm_qa/percona_server_for_mysql/data/my-async-replication-57.cnf.j2 b/pmm_qa/percona_server_for_mysql/data/my-async-replication-57.cnf.j2 new file mode 100644 index 00000000..b2f96dc5 --- /dev/null +++ b/pmm_qa/percona_server_for_mysql/data/my-async-replication-57.cnf.j2 @@ -0,0 +1,38 @@ +[mysqld] +# General server configuration +server_id={{ item }} +bind-address=0.0.0.0 +port={{ mysql_listen_port }} +userstat=1 + +# Replication settings +gtid_mode=ON +enforce_gtid_consistency=ON +log_bin=binlog +log_slave_updates=ON +sync_binlog=1 +binlog_checksum=NONE +# Only disable engines supported in 5.7 and safe for GTID +disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE" +# MacOS-specific, where table names are case-sensitive +lower_case_table_names=2 + +# Optional: report_host is valid in 5.7 +report_host={{ container_prefix }}{{ item }} + +# Replica configuration - applies to all nodes except primary (they'll be able to become replicas) +{% if item != 1 %} +# Replica specific settings (use slave_parallel_* in 5.7) +slave_parallel_workers=4 +slave_parallel_type=LOGICAL_CLOCK +slave_preserve_commit_order=1 +{% endif %} + +# Crash-safe replication settings +relay-log={{ container_prefix }}{{ item }}-relay-bin +relay_log_recovery=ON +relay_log_purge=ON + +# Performance and connection settings +max_connections=1000 +innodb_buffer_pool_size=256M diff --git a/pmm_qa/percona_server_for_mysql/data/my-async-replication.cnf.j2 b/pmm_qa/percona_server_for_mysql/data/my-async-replication.cnf.j2 new file mode 100644 index 00000000..efce584c --- /dev/null +++ b/pmm_qa/percona_server_for_mysql/data/my-async-replication.cnf.j2 @@ -0,0 +1,42 @@ +[mysqld] +# General server configuration +server_id={{ item }} +bind-address=0.0.0.0 +port={{ mysql_listen_port }} + +# Authentication settings for caching_sha2_password +caching_sha2_password_auto_generate_rsa_keys=ON +# The following two parameters tell MySQL where to store the RSA key pair +caching_sha2_password_private_key_path=private_key.pem +caching_sha2_password_public_key_path=public_key.pem + +# Replication settings +gtid_mode=ON +enforce_gtid_consistency=ON +log_bin=binlog +log_replica_updates=ON +sync_binlog=1 +binlog_checksum=NONE +disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY" +# MacOS-specific, where table names are case-sensitive +lower_case_table_names=2 + +# MySQL 8.4 compatibility settings +report_host={{ container_prefix }}{{ item }} + +# Replica configuration - applies to all nodes except primary (they'll be able to become replicas) +{% if item != 1 %} +# Replica specific settings +replica_parallel_workers=4 +replica_parallel_type=LOGICAL_CLOCK +replica_preserve_commit_order=1 +{% endif %} + +# Crash-safe replication settings +relay-log={{ container_prefix }}{{ item }}-relay-bin +relay_log_recovery=ON +relay_log_purge=ON + +# Performance and connection settings +max_connections=1000 +innodb_buffer_pool_size=256M diff --git a/pmm_qa/percona_server_for_mysql/data/my-group-replication-57.cnf.j2 b/pmm_qa/percona_server_for_mysql/data/my-group-replication-57.cnf.j2 new file mode 100644 index 00000000..74383253 --- /dev/null +++ b/pmm_qa/percona_server_for_mysql/data/my-group-replication-57.cnf.j2 @@ -0,0 +1,49 @@ +[mysqld] +# General server configuration +server_id={{ server_id_start + item - 1 }} +binlog_format=ROW +bind-address=0.0.0.0 +port={{ mysql_listen_port }} +userstat=1 + +# 5.7 General replication settings +gtid_mode=ON +enforce_gtid_consistency=ON +master_info_repository=TABLE +relay_log_info_repository=TABLE +transaction_write_set_extraction=XXHASH64 +binlog_checksum=NONE +log_bin=binlog +log_slave_updates=ON +# NO: disabled_storage_engines in 5.7 +lower_case_table_names=2 + +# Report host for replication/monitoring +report_host={{ container_prefix }}{{ item }} + +# Group Replication Settings +# (Available in MySQL 5.7.17+ and must be installed as plugin) +plugin_load_add='group_replication.so' +loose-group_replication_group_name='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' +loose-group_replication_local_address='{{ container_prefix }}{{ item }}:{{ group_seeds_port }}' +loose-group_replication_group_seeds={% for i in range(1, nodes_count | int + 1) %}{{ container_prefix }}{{ i }}:{{ group_seeds_port }}{% if not loop.last %},{% endif %}{% endfor %} +loose-group_replication_communication_stack=XCOM + +# Group replication behavior +loose-group_replication_start_on_boot=OFF +loose-group_replication_bootstrap_group=OFF +loose-group_replication_single_primary_mode=ON +loose-group_replication_enforce_update_everywhere_checks=OFF + +# Recovery settings +loose-group_replication_recovery_retry_count=10 +loose-group_replication_recovery_reconnect_interval=60 + +# Crash-safe replication settings +relay-log={{ container_prefix }}{{ item }}-relay-bin +relay_log_recovery=ON +relay_log_purge=ON + +# Performance and connection settings +max_connections=1000 +innodb_buffer_pool_size=256M diff --git a/pmm_qa/percona_server_for_mysql/data/my-group-replication.cnf.j2 b/pmm_qa/percona_server_for_mysql/data/my-group-replication.cnf.j2 new file mode 100644 index 00000000..df2434f9 --- /dev/null +++ b/pmm_qa/percona_server_for_mysql/data/my-group-replication.cnf.j2 @@ -0,0 +1,45 @@ +[mysqld] +# General server configuration +server_id={{ server_id_start + item - 1 }} +bind-address=0.0.0.0 +port={{ mysql_listen_port }} +userstat=1 + +# General replication settings +gtid_mode=ON +enforce_gtid_consistency=ON +binlog_checksum=NONE +log_bin=binlog +log_replica_updates=ON +disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY" +lower_case_table_names=2 # MacOS-specific, but also good generally + +# MySQL 8.4 compatibility settings +report_host={{ container_prefix }}{{ item }} + +# Group Replication Settings +plugin_load_add='group_replication.so' +loose-group_replication_group_name='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' +loose-group_replication_local_address='{{ container_prefix }}{{ item }}:{{ group_seeds_port }}' +loose-group_replication_group_seeds='{% for i in range(1, nodes_count | int + 1) %}{{ container_prefix }}{{ i }}:{{ group_seeds_port }}{% if not loop.last %},{% endif %}{% endfor %}' +loose-group_replication_communication_stack=XCOM + +# Group replication behavior +loose-group_replication_start_on_boot=OFF +loose-group_replication_bootstrap_group=OFF +loose-group_replication_single_primary_mode=ON +loose-group_replication_enforce_update_everywhere_checks=OFF + +# Recovery settings +loose-group_replication_recovery_get_public_key=ON +loose-group_replication_recovery_retry_count=10 +loose-group_replication_recovery_reconnect_interval=60 + +# Crash-safe replication settings +relay-log={{ container_prefix }}{{ item }}-relay-bin +relay_log_recovery=ON +relay_log_purge=ON + +# Performance and connection settings +max_connections=1000 +innodb_buffer_pool_size=256M diff --git a/pmm_qa/percona_server_for_mysql/data/my.cnf.j2 b/pmm_qa/percona_server_for_mysql/data/my.cnf.j2 new file mode 100644 index 00000000..d1f8bde9 --- /dev/null +++ b/pmm_qa/percona_server_for_mysql/data/my.cnf.j2 @@ -0,0 +1,3 @@ +[mysqld] +plugin-load-add=auth_native_password.so +userstat=1 \ No newline at end of file diff --git a/pmm_qa/percona_server_for_mysql/percona-server-setup.yml b/pmm_qa/percona_server_for_mysql/percona-server-setup.yml new file mode 100644 index 00000000..af7fa83c --- /dev/null +++ b/pmm_qa/percona_server_for_mysql/percona-server-setup.yml @@ -0,0 +1,173 @@ +--- + +- name: Setup Percona Server for MySQL single, cluster with group Replication in Docker + hosts: localhost + connection: local + gather_facts: yes + vars: + ps_version: "{{ (lookup('env', 'PS_VERSION') | default('8.0', true)) | replace('.', '_') }}" + replication_user: "repl_user" + replication_password: "GRgrO9301RuF" + root_password: "GRgrO9301RuF" + mysql_port: 33066 + mysql_listen_port: 3306 + group_seeds_port: 34061 + nodes_count: "{{ (lookup('env', 'NODES_COUNT') | default('1', true)) | int }}" + network_name: "pmm-qa" + data_dir: "{{ lookup('env', 'HOME') }}/mysql_cluster_data" + server_id_start: 1 + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('3-dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + query_source: "{{ lookup('env', 'QUERY_SOURCE') | default('perfschema', true) }}" + metrics_mode: "{{ lookup('env', 'metrics_mode') }}" + setup_type: "{{ lookup('env', 'SETUP_TYPE') }}" + random_service_name_value: "" + my_rocks: "{{ lookup('env', 'MY_ROCKS') | default(false, true) }}" + container_prefix: "ps_pmm{{ (setup_type|default('')) and '_' ~ setup_type }}_{{ ps_version }}_" + + tasks: + - name: Modify the node count for group replication + set_fact: + nodes_count: 3 + when: nodes_count | int < 3 and setup_type == "gr" + + - name: Chance to correct nodes count for async replication + set_fact: + nodes_count: 2 + when: nodes_count | int < 2 and setup_type == "replication" + + - name: Create Docker network + shell: docker network create {{ network_name }} + ignore_errors: true + + - name: Remove old data folders + shell: 'rm -fr {{ data_dir }}' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Create data directories + file: + path: "{{ data_dir }}/node{{ item }}/data" + state: directory + mode: '0755' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Recursively change ownership of a directory + shell: "sudo chown -R 1001:1001 {{ data_dir }}/node{{ item }}/data" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Setup Percona Server group replication + include_tasks: ./tasks/percona-server-group-replication-setup.yml + when: setup_type == "gr" + + - name: Setup Percona Server with async replication + include_tasks: ./tasks/percona-server-async-replication-setup.yml + when: setup_type == "replication" + + - name: Setup Percona Server + include_tasks: tasks/percona-server-setup-single.yml + when: setup_type != "gr" and setup_type != "replication" + + - name: Wait 10 seconds for setup to finish + pause: + seconds: 10 + + - name: Create slowlog configuration for mysql nodes + shell: | + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "SET GLOBAL slow_query_log='ON'; SET GLOBAL long_query_time=0;" + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "SET GLOBAL log_slow_admin_statements=ON; SET GLOBAL log_slow_slave_statements=ON;" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: query_source == "slowlog" + + - name: Install and add pmm client. + include_tasks: ../tasks/install_pmm_client.yml + vars: + container_name: "{{ container_prefix }}{{ item }}" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Generate random service name suffix + set_fact: + random_service_name_value: "_{{ 99999 | random + 1 }}" + + - name: Add service to pmm server + shell: docker exec {{ container_prefix }}{{ item }} pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --environment=ps-gr-dev --cluster=ps-gr-dev-cluster --replication-set=ps-gr-replication {{ container_prefix }}{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:3306 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type == "gr" + + - name: Add service to pmm server + shell: docker exec {{ container_prefix }}{{ item }} pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --environment=ps-replication-dev --cluster=ps-replication-dev-cluster --replication-set=ps-async-replication {{ container_prefix }}{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:3306 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type == "replication" + + - name: Add service to pmm server + shell: docker exec {{ container_prefix }}{{ item }} pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --cluster=ps-single-dev-cluster --environment=ps-dev {{ container_prefix }}{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:3306 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type != "gr" and setup_type != "replication" + + - name: Install sysbench inside of all percona server nodes + shell: docker exec {{ container_prefix }}{{ item }} apt-get install -y sysbench + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Prepare sysbench inside of all percona server nodes + shell: docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "SET GLOBAL super_read_only = OFF; SET GLOBAL read_only = OFF;" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Prepare sysbench inside of all percona server nodes + shell: | + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "CREATE DATABASE sbtest; CREATE USER 'sbtest'@'localhost' IDENTIFIED BY 'password';" + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'localhost'; CREATE USER 'sbtest'@'127.0.0.1' IDENTIFIED BY 'password';" + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'127.0.0.1'; FLUSH PRIVILEGES;" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type != "gr" and setup_type != "replication" #and ps_version | replace('_', '') | int >= 84 + + - name: Prepare sysbench inside of primary percona server node + shell: | + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "CREATE DATABASE sbtest; CREATE USER 'sbtest'@'localhost' IDENTIFIED BY 'password';" + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'localhost'; CREATE USER 'sbtest'@'127.0.0.1' IDENTIFIED BY 'password';" + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'127.0.0.1'; FLUSH PRIVILEGES;" + when: setup_type == "gr" or setup_type == "replication" # and ps_version | replace('_', '') | int >= 84 + + - name: Prepare data for sysbench inside of all percona server nodes + shell: | + docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 prepare + when: setup_type != "gr" and setup_type != "replication" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Prepare data for sysbench inside of first percona server nodes + shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 prepare + when: setup_type == "gr" or setup_type == "replication" + + - name: Run load for sysbench inside of all percona server nodes + shell: docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --threads=16 --time=60 run + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type != "gr" and setup_type != "replication" + + - name: Run load for sysbench inside of primary percona server node + shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --threads=16 --time=60 run + when: setup_type == "gr" and setup_type == "replication" + + - name: Copy a load file into the container + shell: docker cp ./data/ps_load.sql {{ container_prefix }}{{ item }}:/ps_load.sql + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Wait 10 seconds for node to be connected + pause: + seconds: 10 + + - name: Run load inside of first percona server node + shell: | + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "CREATE DATABASE school;" + docker exec {{ container_prefix }}1 sh -c "mysql -uroot -p{{ root_password }} school < /ps_load.sql" + when: setup_type in ['gr', 'replication'] and (ps_version | replace('_','') | int) >= 80 + + - name: Run load inside of all percona server nodes + shell: | + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "CREATE DATABASE school;" + docker exec {{ container_prefix }}{{ item }} sh -c "mysql -uroot -p{{ root_password }} school < /ps_load.sql" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type not in ['gr', 'replication'] and (ps_version | replace('_','') | int) >= 80 + + - name: Enable MySQL MyRocks + shell: docker exec {{ container_prefix }}{{ item }} ps-admin --enable-rocksdb -u root -p{{ root_password }} + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: my_rocks | bool \ No newline at end of file diff --git a/pmm_qa/percona_server_for_mysql/tasks/percona-server-async-replication-setup.yml b/pmm_qa/percona_server_for_mysql/tasks/percona-server-async-replication-setup.yml new file mode 100644 index 00000000..e07d3215 --- /dev/null +++ b/pmm_qa/percona_server_for_mysql/tasks/percona-server-async-replication-setup.yml @@ -0,0 +1,81 @@ +- name: Generate my.cnf for each node + template: + src: ./data/my-async-replication{{ '-57' if ps_version | replace('_', '') == '57' else '' }}.cnf.j2 + dest: "{{ data_dir }}/node{{ item }}/my.cnf" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Prepare docker container and install Percona Server for MySQL + include_tasks: ./tasks/prepare_install_ps.yml + +- name: Reset configuration for all nodes + shell: docker exec {{ container_prefix}}{{ item }} mysql -uroot -p{{ root_password }} -e "RESET BINARY LOGS AND GTIDS; RESET REPLICA ALL;" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + ignore_errors: yes + +- name: Configure replica servers (container2-containerN) for MySQL 8.0 and above + shell: > + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e " + CHANGE REPLICATION SOURCE TO + SOURCE_HOST='{{ container_prefix }}1', + SOURCE_PORT={{ mysql_listen_port }}, + SOURCE_USER='{{ replication_user }}', + SOURCE_PASSWORD='{{ replication_password }}', + SOURCE_AUTO_POSITION=1, + SOURCE_PUBLIC_KEY_PATH='', + GET_SOURCE_PUBLIC_KEY=1; + START REPLICA; + " + loop: "{{ range(2, nodes_count | int + 1) | list }}" + when: ps_version | replace('_', '') | int >= 80 + +- name: Configure replica servers (container2-containerN) for Mysql 5.7 + shell: > + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e " + CHANGE MASTER TO + MASTER_HOST='{{ container_prefix }}1', + MASTER_PORT={{ mysql_listen_port }}, + MASTER_USER='{{ replication_user }}', + MASTER_PASSWORD='{{ replication_password }}', + MASTER_AUTO_POSITION=1; + START SLAVE; + " + loop: "{{ range(2, nodes_count | int + 1) | list }}" + when: ps_version | replace('_', '') | int < 80 + +- name: Create and seed a test database on primary + shell: > + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e " + CREATE DATABASE testdb; + USE testdb; + CREATE TABLE testdb (id INT PRIMARY KEY, data VARCHAR(100)); + INSERT INTO testdb VALUES (1, 'Initial data from node mysql1'); + " +- name: Check replication status on replica nodes + shell: docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "SHOW REPLICA STATUS\G" + loop: "{{ range(2, nodes_count | int + 1) | list }}" + changed_when: false + when: ps_version | replace('_', '') | int >= 80 + +- name: Check replication status on replica nodes + shell: docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "SHOW SLAVE STATUS\G" + loop: "{{ range(2, nodes_count | int + 1) | list }}" + changed_when: false + when: ps_version | replace('_', '') | int < 80 + +- name: Set verification instructions + set_fact: + verification_msg: | + MySQL Cluster setup complete with asynchronous replication! + To verify replication is working: + 1. Connect to the primary ({{ container_prefix }}1): + docker exec -it {{ container_prefix }}1 mysql -uroot -p{{ root_password }} + 2. Insert data in the test database: + USE testdb; + INSERT INTO testdb VALUES (100, 'Test replication'); + 3. Connect to replicas and verify data is replicated: + docker exec -it {{ container_prefix }}2 mysql -uroot -p{{ root_password }} + USE testdb; + SELECT * FROM testdb; +- name: Display verification instructions + debug: + msg: "{{ verification_msg | split('\n') }}" \ No newline at end of file diff --git a/pmm_qa/percona_server_for_mysql/tasks/percona-server-group-replication-setup.yml b/pmm_qa/percona_server_for_mysql/tasks/percona-server-group-replication-setup.yml new file mode 100644 index 00000000..f229f97e --- /dev/null +++ b/pmm_qa/percona_server_for_mysql/tasks/percona-server-group-replication-setup.yml @@ -0,0 +1,120 @@ +- name: Generate my.cnf for each node + template: + src: ./data/my-group-replication{{ '-57' if ps_version | replace('_', '') == '57' else '' }}.cnf.j2 + dest: "{{ data_dir }}/node{{ item }}/my.cnf" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Prepare docker container and install Percona Server for MySQL + include_tasks: ./tasks/prepare_install_ps.yml + +- name: Reset configuration for all nodes for MySQL 8.4 + shell: > + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e"RESET BINARY LOGS AND GTIDS; RESET REPLICA ALL; SET GLOBAL gtid_purged='';" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: ps_version | replace('_', '') | int >= 84 + +- name: Reset configuration for all nodes for MySQL 8.0 and bellow + shell: > + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "RESET MASTER; RESET SLAVE ALL;" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: ps_version | replace('_', '') | int < 84 + +- name: Init configuration for group replication (single exec per node, no binlog) for MySQL 8.0+ + shell: | + docker exec -i {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} <<'EOSQL' + SET SQL_LOG_BIN=0; + CREATE USER IF NOT EXISTS '{{ replication_user }}'@'%' IDENTIFIED BY '{{ replication_password }}'; + GRANT REPLICATION SLAVE ON *.* TO '{{ replication_user }}'@'%'; + GRANT CONNECTION_ADMIN ON *.* TO '{{ replication_user }}'@'%'; + GRANT BACKUP_ADMIN ON *.* TO '{{ replication_user }}'@'%'; + GRANT GROUP_REPLICATION_STREAM ON *.* TO '{{ replication_user }}'@'%'; + GRANT SERVICE_CONNECTION_ADMIN ON *.* TO '{{ replication_user }}'@'%'; + GRANT SYSTEM_VARIABLES_ADMIN ON *.* TO '{{ replication_user }}'@'%'; + FLUSH PRIVILEGES; + -- Configure recovery channel credentials (explicit host/port to primary node) + CHANGE REPLICATION SOURCE TO + SOURCE_USER='{{ replication_user }}', + SOURCE_PASSWORD='{{ replication_password }}' + FOR CHANNEL 'group_replication_recovery'; + SET SQL_LOG_BIN=1; + EOSQL + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: ps_version | replace('_', '') | int >= 80 + +- name: Init configuration for group replication (single exec per node, no binlog) for MySQL 5.7 + shell: | + docker exec -i {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} <<'EOSQL' + SET SQL_LOG_BIN=0; + CREATE USER IF NOT EXISTS 'repl_user'@'%' IDENTIFIED BY 'GRgrO9301RuF'; + GRANT REPLICATION SLAVE ON *.* TO 'repl_user'@'%'; + FLUSH PRIVILEGES; + -- Configure recovery channel credentials (explicit host/port to primary node) + CHANGE MASTER TO + MASTER_USER='{{ replication_user }}', + MASTER_PASSWORD='{{ replication_password }}' + FOR CHANNEL 'group_replication_recovery'; + SET SQL_LOG_BIN=1; + EOSQL + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: ps_version | replace('_', '') | int < 80 + +- name: Bootstrap first node (primary) for MySQL 8.4/8.0 + shell: | + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "SET GLOBAL group_replication_bootstrap_group=ON;" + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "START GROUP_REPLICATION;" + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "SET GLOBAL group_replication_bootstrap_group=OFF;" +- name: Wait for bootstrap to complete + pause: + seconds: 10 + +- name: Start group replication on other nodes + shell: | + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "START GROUP_REPLICATION;" + loop: "{{ range(2, nodes_count | int + 1) | list }}" + +- name: Wait 10 seconds for the other nodes to join + pause: + seconds: 10 + +- name: Create and seed a test database on primary + shell: > + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e " + CREATE DATABASE testdb; + USE testdb; + CREATE TABLE testdb (id INT PRIMARY KEY, data VARCHAR(100)); + INSERT INTO testdb VALUES (1, 'Initial data from node mysql1'); + " +- name: Check replication status on first node + shell: docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "SELECT * FROM performance_schema.replication_group_members;" + register: replication_status + +- name: Display replication status + debug: + var: replication_status.stdout + +- name: Check replication group members count + shell: docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "SELECT COUNT(*) AS count FROM performance_schema.replication_group_members;" + register: member_count + +- name: Display member count + debug: + var: member_count.stdout + +- name: Set verification instructions + set_fact: + verification_msg: | + MySQL Cluster setup complete! + To verify replication is working: + 1. Connect to the first node: + docker exec -it {{ container_prefix }}1 mysql -uroot -p{{ root_password }} + 2. Insert data in the test database: + USE testdb; + INSERT INTO testdb VALUES (100, 'Test replication'); + 3. Connect to other nodes and verify data is replicated: + docker exec -it {{ container_prefix }}2 mysql -uroot -p{{ root_password }} + USE testdb; + SELECT * FROM testdb; + +- name: Display verification instructions + debug: + msg: "{{ verification_msg | split('\n') }}" diff --git a/pmm_qa/percona_server_for_mysql/tasks/percona-server-setup-single.yml b/pmm_qa/percona_server_for_mysql/tasks/percona-server-setup-single.yml new file mode 100644 index 00000000..75d4ea58 --- /dev/null +++ b/pmm_qa/percona_server_for_mysql/tasks/percona-server-setup-single.yml @@ -0,0 +1,8 @@ +- name: Generate my.cnf for each node + template: + src: ./data/my.cnf.j2 + dest: "{{ data_dir }}/node{{ item }}/my.cnf" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Prepare docker container and install Percona Server for MySQL + include_tasks: ./tasks/prepare_install_ps.yml diff --git a/pmm_qa/percona_server_for_mysql/tasks/prepare_install_ps.yml b/pmm_qa/percona_server_for_mysql/tasks/prepare_install_ps.yml new file mode 100644 index 00000000..dce072be --- /dev/null +++ b/pmm_qa/percona_server_for_mysql/tasks/prepare_install_ps.yml @@ -0,0 +1,72 @@ +- name: Prepare Container for Percona Server for MySQL 8.0+ + shell: | + docker run --rm -d --name="{{ container_prefix }}{{ item }}" \ + --network="pmm-qa" \ + --privileged --cgroupns=host -v /sys/fs/cgroup:/sys/fs/cgroup:rw \ + -v /var/lib/containerd \ + -v ./ssl:/ssl \ + antmelekhin/docker-systemd:ubuntu-24.04 + when: ps_version | replace('_', '') | int >= 80 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Prepare Container for Percona Server for MySQL 5.7 + shell: | + docker run --rm -d --name="{{ container_prefix }}{{ item }}" \ + --network="pmm-qa" \ + --privileged --cgroupns=host -v /sys/fs/cgroup:/sys/fs/cgroup:rw \ + -v /var/lib/containerd \ + -v ./ssl:/ssl \ + antmelekhin/docker-systemd:ubuntu-22.04 + when: ps_version | replace('_', '') | int < 80 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Install dependencies + shell: | + docker exec {{ container_prefix }}{{ item }} apt-get update + docker exec {{ container_prefix }}{{ item }} apt-get -y install wget gnupg2 lsb-release curl + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Install Percona Release + shell: | + docker exec {{ container_prefix }}{{ item }} wget https://repo.percona.com/apt/percona-release_latest.generic_all.deb + docker exec {{ container_prefix }}{{ item }} dpkg -i ./percona-release_latest.generic_all.deb + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Enable Percona Server for MySQL repository + shell: docker exec {{ container_prefix }}{{ item }} percona-release enable {{ 'ps-84-lts' if ps_version == '8_4' else 'ps-' + ps_version | replace('_', '') }} release + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Install Percona Server for MySQL + shell: | + docker exec {{ container_prefix }}{{ item }} apt install -y \ + percona-server-server{{ '-5.7' if ps_version | replace('_', '') == '57' else '' }} \ + percona-server-rocksdb{{ '-5.7' if ps_version | replace('_', '') == '57' else '' }} \ + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Start Percona Server for MySQL + shell: | + docker exec {{ container_prefix }}{{ item }} systemctl enable mysql + docker exec {{ container_prefix }}{{ item }} systemctl start mysql + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Copy config file to docker container + shell: docker cp {{ data_dir }}/node{{ item }}/my.cnf {{ container_prefix }}{{ item }}:/etc/mysql/my.cnf + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Restart Percona Server for MySQL + shell: docker exec {{ container_prefix }}{{ item }} systemctl restart mysql + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Wait 5 seconds for MySQL to start + pause: + seconds: 5 + +- name: Chance root password Percona Server for MySQL 5.7 + shell: "docker exec {{ container_prefix }}{{ item }} mysql -e \"ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY '{{ root_password }}';\"" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: ps_version|replace('_', '')|int < 80 + +- name: Chance root password Percona Server for MySQL 8.0+ + shell: "docker exec {{ container_prefix }}{{ item }} mysql -e \"ALTER USER 'root'@'localhost' IDENTIFIED WITH caching_sha2_password BY '{{ root_password }}';\"" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: ps_version|replace('_', '')|int >= 80 diff --git a/pmm_qa/pmm-framework.py b/pmm_qa/pmm-framework.py index 7b477332..5084b621 100755 --- a/pmm_qa/pmm-framework.py +++ b/pmm_qa/pmm-framework.py @@ -66,43 +66,19 @@ def setup_ps(db_type, db_version=None, db_config=None, args=None): # Gather Version details ps_version = os.getenv('PS_VERSION') or db_version or database_configs[db_type]["versions"][-1] ps_version_int = int(ps_version.replace(".", "")) - if ps_version_int >= 84: - # Define environment variables for playbook - env_vars = { - 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', - 'SETUP_TYPE': setup_type_value, - 'NODES_COUNT': get_value('NODES_COUNT', db_type, args, db_config), - 'QUERY_SOURCE': get_value('QUERY_SOURCE', db_type, args, db_config), - 'PS_VERSION': ps_version, - 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), - 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', - 'MY_ROCKS': get_value('MY_ROCKS', db_type, args, db_config), - } - - run_ansible_playbook('percona_server/percona-server-setup.yml', env_vars, args) - else: - # Define environment variables for playbook - env_vars = { - 'GROUP_REPLICATION': setup_type, - 'PS_NODES': no_of_nodes, - 'PS_VERSION': ps_version, - 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', - 'PS_CONTAINER': 'ps_pmm_' + str(ps_version) + ( - '_replica' if setup_type_value in ("replication", "replica") else ''), - 'PS_PORT': 3318 if setup_type_value in ("replication", "replica") else 3317, - 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), - 'QUERY_SOURCE': get_value('QUERY_SOURCE', db_type, args, db_config), - 'PS_TARBALL': get_value('TARBALL', db_type, args, db_config), - 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', - 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' - } - - # Ansible playbook filename - playbook_filename = 'ps_pmm_setup.yml' - - # Call the function to run the Ansible playbook - run_ansible_playbook(playbook_filename, env_vars, args) + # Define environment variables for playbook + env_vars = { + 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', + 'SETUP_TYPE': setup_type_value, + 'NODES_COUNT': get_value('NODES_COUNT', db_type, args, db_config), + 'QUERY_SOURCE': get_value('QUERY_SOURCE', db_type, args, db_config), + 'PS_VERSION': ps_version, + 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), + 'ADMIN_PASSWORD': os.getenv('ADMIN_PASSWORD') or args.pmm_server_password or 'admin', + 'MY_ROCKS': get_value('MY_ROCKS', db_type, args, db_config), + } + run_ansible_playbook('percona_server_for_mysql/percona-server-setup.yml', env_vars, args) def setup_mysql(db_type, db_version=None, db_config=None, args=None): # Check if PMM server is running diff --git a/pmm_qa/scripts/database_options.py b/pmm_qa/scripts/database_options.py index b58373fd..1024ea6c 100644 --- a/pmm_qa/scripts/database_options.py +++ b/pmm_qa/scripts/database_options.py @@ -23,7 +23,7 @@ "TARBALL": ""} }, "MYSQL": { - "versions": ["8.0", "8.4"], + "versions": ["5.7", "8.0", "8.4"], "configurations": {"QUERY_SOURCE": "perfschema", "SETUP_TYPE": "", "CLIENT_VERSION": "3-dev-latest", "TARBALL": ""} }, From 4a5dd66f363c9d3fec1f8b6ee8ac4a600a911ace Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 14:19:26 +0100 Subject: [PATCH 02/39] PMM-13992: Test data --- .../percona_server_for_mysql/data/ps_load.sql | 94 +++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 pmm_qa/percona_server_for_mysql/data/ps_load.sql diff --git a/pmm_qa/percona_server_for_mysql/data/ps_load.sql b/pmm_qa/percona_server_for_mysql/data/ps_load.sql new file mode 100644 index 00000000..8992e400 --- /dev/null +++ b/pmm_qa/percona_server_for_mysql/data/ps_load.sql @@ -0,0 +1,94 @@ +-- ======================================== +-- CREATE TABLES +-- ======================================== + +CREATE TABLE students ( + student_id INT AUTO_INCREMENT PRIMARY KEY, + first_name VARCHAR(50), + last_name VARCHAR(50), + birth_date DATE +); + +CREATE TABLE classes ( + class_id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(100), + teacher VARCHAR(100) +); + +CREATE TABLE enrollments ( + enrollment_id INT AUTO_INCREMENT PRIMARY KEY, + student_id INT, + class_id INT, + enrollment_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (student_id) REFERENCES students(student_id), + FOREIGN KEY (class_id) REFERENCES classes(class_id) +); + +-- ======================================== +-- INSERT INITIAL DATA +-- ======================================== + +INSERT INTO students (first_name, last_name, birth_date) VALUES +('Alice', 'Smith', '2005-04-10'), +('Bob', 'Johnson', '2006-08-15'), +('Charlie', 'Brown', '2004-12-01'); + +INSERT INTO classes (name, teacher) VALUES +('Mathematics', 'Mrs. Taylor'), +('History', 'Mr. Anderson'), +('Science', 'Dr. Reynolds'); + +INSERT INTO enrollments (student_id, class_id) VALUES +(1, 1), +(1, 2), +(2, 2), +(3, 1), +(3, 3); + +-- ======================================== +-- SELECT: View all data after insert +-- ======================================== + +-- View all students +SELECT * FROM students; + +-- View all classes +SELECT * FROM classes; + +-- View all enrollments +SELECT * FROM enrollments; + +-- View students enrolled in Mathematics +SELECT s.first_name, s.last_name +FROM students s +JOIN enrollments e ON s.student_id = e.student_id +JOIN classes c ON e.class_id = c.class_id +WHERE c.name = 'Mathematics'; + +-- Count students per class +SELECT c.name AS class_name, COUNT(e.student_id) AS student_count +FROM classes c +LEFT JOIN enrollments e ON c.class_id = e.class_id +GROUP BY c.name; + +-- ======================================== +-- UPDATE DATA +-- ======================================== + +UPDATE students +SET last_name = 'Williams' +WHERE first_name = 'Bob' AND last_name = 'Johnson'; + +UPDATE classes +SET teacher = 'Ms. Carter' +WHERE name = 'History'; + +-- ======================================== +-- DELETE DATA +-- ======================================== + +DELETE FROM enrollments +WHERE student_id = (SELECT student_id FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'); + +DELETE FROM students +WHERE first_name = 'Alice' AND last_name = 'Smith'; From 09cfb44bc27aa5eb5cba66b83837298d91e74299 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 14:36:43 +0100 Subject: [PATCH 03/39] PMM-13992: Test data --- .../data/ps_load.sql => data/mysql_load.sql} | 0 pmm_qa/percona_server/percona-server-setup.yml | 2 +- pmm_qa/percona_server_for_mysql/percona-server-setup.yml | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename pmm_qa/{percona_server_for_mysql/data/ps_load.sql => data/mysql_load.sql} (100%) diff --git a/pmm_qa/percona_server_for_mysql/data/ps_load.sql b/pmm_qa/data/mysql_load.sql similarity index 100% rename from pmm_qa/percona_server_for_mysql/data/ps_load.sql rename to pmm_qa/data/mysql_load.sql diff --git a/pmm_qa/percona_server/percona-server-setup.yml b/pmm_qa/percona_server/percona-server-setup.yml index 34bd2658..e4e00a75 100644 --- a/pmm_qa/percona_server/percona-server-setup.yml +++ b/pmm_qa/percona_server/percona-server-setup.yml @@ -229,7 +229,7 @@ - name: Copy a load file into the container community.docker.docker_container_copy_into: container: "ps_pmm_{{ ps_version }}_{{ item }}" - path: ./data/ps_load.sql + path: ../data/mysql_load.sql container_path: /ps_load.sql loop: "{{ range(1, nodes_count | int + 1) | list }}" diff --git a/pmm_qa/percona_server_for_mysql/percona-server-setup.yml b/pmm_qa/percona_server_for_mysql/percona-server-setup.yml index af7fa83c..cec920c8 100644 --- a/pmm_qa/percona_server_for_mysql/percona-server-setup.yml +++ b/pmm_qa/percona_server_for_mysql/percona-server-setup.yml @@ -147,7 +147,7 @@ when: setup_type == "gr" and setup_type == "replication" - name: Copy a load file into the container - shell: docker cp ./data/ps_load.sql {{ container_prefix }}{{ item }}:/ps_load.sql + shell: docker cp ../data/mysql_load.sql {{ container_prefix }}{{ item }}:/ps_load.sql loop: "{{ range(1, nodes_count | int + 1) | list }}" - name: Wait 10 seconds for node to be connected From 8704573e2ba77bf7c1156577977f1b0a97af7baf Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 15:23:27 +0100 Subject: [PATCH 04/39] PMM-13992: Add support for mysql --- .../mysql/data/my-async-replication-57.cnf.j2 | 38 ++++ pmm_qa/mysql/data/my-async-replication.cnf.j2 | 42 +++++ .../mysql/data/my-group-replication-57.cnf.j2 | 49 +++++ pmm_qa/mysql/data/my-group-replication.cnf.j2 | 52 ++++++ pmm_qa/mysql/data/my.cnf.j2 | 2 + pmm_qa/mysql/mysql-setup.yml | 173 ++++++++++++++++++ .../tasks/mysql-async-replication-setup.yml | 86 +++++++++ .../tasks/mysql-group-replication-setup.yml | 123 +++++++++++++ pmm_qa/mysql/tasks/mysql-single.yml | 11 ++ pmm_qa/mysql/tasks/prepare_install_mysql.yml | 123 +++++++++++++ pmm_qa/pmm-framework.py | 8 +- 11 files changed, 701 insertions(+), 6 deletions(-) create mode 100644 pmm_qa/mysql/data/my-async-replication-57.cnf.j2 create mode 100644 pmm_qa/mysql/data/my-async-replication.cnf.j2 create mode 100644 pmm_qa/mysql/data/my-group-replication-57.cnf.j2 create mode 100644 pmm_qa/mysql/data/my-group-replication.cnf.j2 create mode 100644 pmm_qa/mysql/data/my.cnf.j2 create mode 100644 pmm_qa/mysql/mysql-setup.yml create mode 100644 pmm_qa/mysql/tasks/mysql-async-replication-setup.yml create mode 100644 pmm_qa/mysql/tasks/mysql-group-replication-setup.yml create mode 100644 pmm_qa/mysql/tasks/mysql-single.yml create mode 100644 pmm_qa/mysql/tasks/prepare_install_mysql.yml diff --git a/pmm_qa/mysql/data/my-async-replication-57.cnf.j2 b/pmm_qa/mysql/data/my-async-replication-57.cnf.j2 new file mode 100644 index 00000000..b2f96dc5 --- /dev/null +++ b/pmm_qa/mysql/data/my-async-replication-57.cnf.j2 @@ -0,0 +1,38 @@ +[mysqld] +# General server configuration +server_id={{ item }} +bind-address=0.0.0.0 +port={{ mysql_listen_port }} +userstat=1 + +# Replication settings +gtid_mode=ON +enforce_gtid_consistency=ON +log_bin=binlog +log_slave_updates=ON +sync_binlog=1 +binlog_checksum=NONE +# Only disable engines supported in 5.7 and safe for GTID +disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE" +# MacOS-specific, where table names are case-sensitive +lower_case_table_names=2 + +# Optional: report_host is valid in 5.7 +report_host={{ container_prefix }}{{ item }} + +# Replica configuration - applies to all nodes except primary (they'll be able to become replicas) +{% if item != 1 %} +# Replica specific settings (use slave_parallel_* in 5.7) +slave_parallel_workers=4 +slave_parallel_type=LOGICAL_CLOCK +slave_preserve_commit_order=1 +{% endif %} + +# Crash-safe replication settings +relay-log={{ container_prefix }}{{ item }}-relay-bin +relay_log_recovery=ON +relay_log_purge=ON + +# Performance and connection settings +max_connections=1000 +innodb_buffer_pool_size=256M diff --git a/pmm_qa/mysql/data/my-async-replication.cnf.j2 b/pmm_qa/mysql/data/my-async-replication.cnf.j2 new file mode 100644 index 00000000..efce584c --- /dev/null +++ b/pmm_qa/mysql/data/my-async-replication.cnf.j2 @@ -0,0 +1,42 @@ +[mysqld] +# General server configuration +server_id={{ item }} +bind-address=0.0.0.0 +port={{ mysql_listen_port }} + +# Authentication settings for caching_sha2_password +caching_sha2_password_auto_generate_rsa_keys=ON +# The following two parameters tell MySQL where to store the RSA key pair +caching_sha2_password_private_key_path=private_key.pem +caching_sha2_password_public_key_path=public_key.pem + +# Replication settings +gtid_mode=ON +enforce_gtid_consistency=ON +log_bin=binlog +log_replica_updates=ON +sync_binlog=1 +binlog_checksum=NONE +disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY" +# MacOS-specific, where table names are case-sensitive +lower_case_table_names=2 + +# MySQL 8.4 compatibility settings +report_host={{ container_prefix }}{{ item }} + +# Replica configuration - applies to all nodes except primary (they'll be able to become replicas) +{% if item != 1 %} +# Replica specific settings +replica_parallel_workers=4 +replica_parallel_type=LOGICAL_CLOCK +replica_preserve_commit_order=1 +{% endif %} + +# Crash-safe replication settings +relay-log={{ container_prefix }}{{ item }}-relay-bin +relay_log_recovery=ON +relay_log_purge=ON + +# Performance and connection settings +max_connections=1000 +innodb_buffer_pool_size=256M diff --git a/pmm_qa/mysql/data/my-group-replication-57.cnf.j2 b/pmm_qa/mysql/data/my-group-replication-57.cnf.j2 new file mode 100644 index 00000000..74383253 --- /dev/null +++ b/pmm_qa/mysql/data/my-group-replication-57.cnf.j2 @@ -0,0 +1,49 @@ +[mysqld] +# General server configuration +server_id={{ server_id_start + item - 1 }} +binlog_format=ROW +bind-address=0.0.0.0 +port={{ mysql_listen_port }} +userstat=1 + +# 5.7 General replication settings +gtid_mode=ON +enforce_gtid_consistency=ON +master_info_repository=TABLE +relay_log_info_repository=TABLE +transaction_write_set_extraction=XXHASH64 +binlog_checksum=NONE +log_bin=binlog +log_slave_updates=ON +# NO: disabled_storage_engines in 5.7 +lower_case_table_names=2 + +# Report host for replication/monitoring +report_host={{ container_prefix }}{{ item }} + +# Group Replication Settings +# (Available in MySQL 5.7.17+ and must be installed as plugin) +plugin_load_add='group_replication.so' +loose-group_replication_group_name='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' +loose-group_replication_local_address='{{ container_prefix }}{{ item }}:{{ group_seeds_port }}' +loose-group_replication_group_seeds={% for i in range(1, nodes_count | int + 1) %}{{ container_prefix }}{{ i }}:{{ group_seeds_port }}{% if not loop.last %},{% endif %}{% endfor %} +loose-group_replication_communication_stack=XCOM + +# Group replication behavior +loose-group_replication_start_on_boot=OFF +loose-group_replication_bootstrap_group=OFF +loose-group_replication_single_primary_mode=ON +loose-group_replication_enforce_update_everywhere_checks=OFF + +# Recovery settings +loose-group_replication_recovery_retry_count=10 +loose-group_replication_recovery_reconnect_interval=60 + +# Crash-safe replication settings +relay-log={{ container_prefix }}{{ item }}-relay-bin +relay_log_recovery=ON +relay_log_purge=ON + +# Performance and connection settings +max_connections=1000 +innodb_buffer_pool_size=256M diff --git a/pmm_qa/mysql/data/my-group-replication.cnf.j2 b/pmm_qa/mysql/data/my-group-replication.cnf.j2 new file mode 100644 index 00000000..473e80e3 --- /dev/null +++ b/pmm_qa/mysql/data/my-group-replication.cnf.j2 @@ -0,0 +1,52 @@ +[mysqld] +# General server configuration +server_id={{ server_id_start + item - 1 }} +bind-address=0.0.0.0 +port={{ mysql_listen_port }} + +# General replication settings +gtid_mode=ON +enforce_gtid_consistency=ON +binlog_checksum=NONE +binlog_format=ROW +transaction_write_set_extraction=XXHASH64 +log_bin=binlog +log_replica_updates=ON +disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY" +lower_case_table_names=2 + +# MySQL 8.4 compatibility settings +report_host={{ container_prefix }}{{ item }} + +# Group Replication Settings +plugin_load_add='group_replication.so' +loose-group_replication_group_name='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' +loose-group_replication_local_address='{{ container_prefix }}{{ item }}:{{ group_seeds_port }}' +loose-group_replication_group_seeds='{% for i in range(1, nodes_count | int + 1) %}{{ container_prefix }}{{ i }}:{{ group_seeds_port }}{% if not loop.last %},{% endif %}{% endfor %}' +loose-group_replication_communication_stack=XCOM + +# Group replication behavior +loose-group_replication_start_on_boot=OFF +loose-group_replication_bootstrap_group=OFF +loose-group_replication_single_primary_mode=ON +loose-group_replication_enforce_update_everywhere_checks=OFF + +# Recovery settings +loose-group_replication_recovery_use_ssl=ON # Add this for secure recovery +loose-group_replication_recovery_get_public_key=ON +loose-group_replication_recovery_retry_count=10 +loose-group_replication_recovery_reconnect_interval=60 + +# Crash-safe replication settings +relay-log={{ container_prefix }}{{ item }}-relay-bin +relay_log_recovery=ON +relay_log_purge=ON + +# Performance and connection settings +max_connections=1000 +innodb_buffer_pool_size=256M + +# Additional recommended settings +loose-group_replication_compression_threshold=1000000 # Compress messages > 1MB +loose-group_replication_member_expel_timeout=5 # Seconds before expelling unresponsive member +loose-group_replication_autorejoin_tries=3 # Auto-rejoin attempts after expulsion diff --git a/pmm_qa/mysql/data/my.cnf.j2 b/pmm_qa/mysql/data/my.cnf.j2 new file mode 100644 index 00000000..6aa308b3 --- /dev/null +++ b/pmm_qa/mysql/data/my.cnf.j2 @@ -0,0 +1,2 @@ +[mysqld] +plugin-load-add=auth_native_password.so diff --git a/pmm_qa/mysql/mysql-setup.yml b/pmm_qa/mysql/mysql-setup.yml new file mode 100644 index 00000000..c9a51069 --- /dev/null +++ b/pmm_qa/mysql/mysql-setup.yml @@ -0,0 +1,173 @@ +--- + +- name: MySQL single, cluster with group Replication in Docker + hosts: localhost + connection: local + gather_facts: yes + vars: + mysql_version: "{{ (lookup('env', 'MS_VERSION') | default('8.0', true)) | replace('.', '_') }}" + replication_user: "repl_user" + replication_password: "GRgrO9301RuF" + root_password: "GRgrO9301RuF" + mysql_port: 33066 + mysql_listen_port: 3306 + group_seeds_port: 34061 + nodes_count: "{{ (lookup('env', 'NODES_COUNT') | default('1', true)) | int }}" + network_name: "pmm-qa" + data_dir: "{{ lookup('env', 'HOME') }}/mysql_cluster_data" + server_id_start: 1 + pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" + client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('3-dev-latest', true) ) }}" + admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" + query_source: "{{ lookup('env', 'QUERY_SOURCE') | default('perfschema', true) }}" + metrics_mode: "{{ lookup('env', 'metrics_mode') }}" + setup_type: "{{ lookup('env', 'SETUP_TYPE') }}" + random_service_name_value: "" + my_rocks: "{{ lookup('env', 'MY_ROCKS') | default(false, true) }}" + container_prefix: "mysql_pmm{{ (setup_type|default('')) and '_' ~ setup_type }}_{{ mysql_version }}_" + + tasks: +# - name: Fail if setup_type is gr or replication and version is less than 80 +# fail: +# msg: "This setup_type ({{ setup_type }}) with version {{ mysql_version | replace('_', '.') }} is not supported!" +# when: (setup_type == 'gr' or setup_type == 'replication') and (mysql_version | replace('_', '') | int < 80) + + - name: Modify the node count for group replication + set_fact: + nodes_count: 3 + when: nodes_count | int < 3 and setup_type == "gr" + + - name: Chance to correct nodes count for async replication + set_fact: + nodes_count: 2 + when: nodes_count | int < 2 and setup_type == "replication" + + - name: Create Docker network + shell: docker network create {{ network_name }} + ignore_errors: true + + - name: Remove old data folders + shell: 'rm -fr {{ data_dir }}' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Create data directories + file: + path: "{{ data_dir }}/node{{ item }}/data" + state: directory + mode: '0755' + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Recursively change ownership of a directory + shell: "sudo chown -R 1001:1001 {{ data_dir }}/node{{ item }}/data" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Setup MySQL group replication + include_tasks: ./tasks/mysql-group-replication-setup.yml + when: setup_type == "gr" + + - name: Setup MySQL with async replication + include_tasks: ./tasks/mysql-async-replication-setup.yml + when: setup_type == "replication" + + - name: Setup MySQL + include_tasks: tasks/mysql-single.yml + when: setup_type != "gr" and setup_type != "replication" + + - name: Wait 10 seconds for setup to finish + pause: + seconds: 10 + + - name: Create slowlog configuration for mysql nodes + shell: | + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "SET GLOBAL slow_query_log='ON'; SET GLOBAL long_query_time=0;" + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "SET GLOBAL log_slow_admin_statements=ON; SET GLOBAL log_slow_slave_statements=ON;" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: query_source == "slowlog" + + - name: Install and add pmm client. + include_tasks: ../tasks/install_pmm_client.yml + vars: + container_name: "{{ container_prefix }}{{ item }}" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Generate random service name suffix + set_fact: + random_service_name_value: "_{{ 99999 | random + 1 }}" + + - name: Add service to pmm server + shell: docker exec {{ container_prefix }}{{ item }} pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --environment=mysql-gr-dev --cluster=mysql-gr-dev-cluster --replication-set=mysql-gr-replication {{ container_prefix }}{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:3306 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type == "gr" + + - name: Add service to pmm server + shell: docker exec {{ container_prefix }}{{ item }} pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --environment=mysql-replication-dev --cluster=mysql-replication-dev-cluster --replication-set=mysql-async-replication {{ container_prefix }}{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:3306 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type == "replication" + + - name: Add service to pmm server + shell: docker exec {{ container_prefix }}{{ item }} pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --cluster=mysql-single-dev-cluster --environment=mysql-dev {{ container_prefix }}{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:3306 + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type != "gr" and setup_type != "replication" + + - name: Install sysbench inside of all mysql nodes + shell: docker exec {{ container_prefix }}{{ item }} apt-get install -y sysbench + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Prepare sysbench inside of all mysql nodes + shell: docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "SET GLOBAL super_read_only = OFF; SET GLOBAL read_only = OFF;" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Prepare sysbench inside of all mysql nodes + shell: | + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "CREATE DATABASE sbtest; CREATE USER 'sbtest'@'localhost' IDENTIFIED BY 'password';" + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'localhost'; CREATE USER 'sbtest'@'127.0.0.1' IDENTIFIED BY 'password';" + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'127.0.0.1'; FLUSH PRIVILEGES;" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type != "gr" and setup_type != "replication" #and mysql_version | replace('_', '') | int >= 84 + + - name: Prepare sysbench inside of primary mysql node + shell: | + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "CREATE DATABASE sbtest; CREATE USER 'sbtest'@'localhost' IDENTIFIED BY 'password';" + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'localhost'; CREATE USER 'sbtest'@'127.0.0.1' IDENTIFIED BY 'password';" + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'127.0.0.1'; FLUSH PRIVILEGES;" + when: setup_type == "gr" or setup_type == "replication" # and mysql_version | replace('_', '') | int >= 84 + + - name: Prepare data for sysbench inside of all mysql nodes + shell: | + docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 prepare + when: setup_type != "gr" and setup_type != "replication" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Prepare data for sysbench inside of first mysql nodes + shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 prepare + when: setup_type == "gr" or setup_type == "replication" + + - name: Run load for sysbench inside of all mysql nodes + shell: docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --threads=16 --time=60 run + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type != "gr" and setup_type != "replication" + + - name: Run load for sysbench inside of primary mysql node + shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --threads=16 --time=60 run + when: setup_type == "gr" and setup_type == "replication" + + - name: Copy a load file into the container + shell: docker cp ../data/mysql_load.sql {{ container_prefix }}{{ item }}:/mysql_load.sql + loop: "{{ range(1, nodes_count | int + 1) | list }}" + + - name: Wait 10 seconds for node to be connected + pause: + seconds: 10 + + - name: Run load inside of first mysql node + shell: | + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "CREATE DATABASE school;" + docker exec {{ container_prefix }}1 sh -c "mysql -uroot -p{{ root_password }} school < /mysql_load.sql" + when: setup_type in ['gr', 'replication'] and (mysql_version | replace('_','') | int) >= 80 + + - name: Run load inside of all mysql nodes + shell: | + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "CREATE DATABASE school;" + docker exec {{ container_prefix }}{{ item }} sh -c "mysql -uroot -p{{ root_password }} school < /mysql_load.sql" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: setup_type not in ['gr', 'replication'] and (mysql_version | replace('_','') | int) >= 80 \ No newline at end of file diff --git a/pmm_qa/mysql/tasks/mysql-async-replication-setup.yml b/pmm_qa/mysql/tasks/mysql-async-replication-setup.yml new file mode 100644 index 00000000..908bb68a --- /dev/null +++ b/pmm_qa/mysql/tasks/mysql-async-replication-setup.yml @@ -0,0 +1,86 @@ +- name: Generate my.cnf for each node + template: + src: ./data/my-async-replication{{ '-57' if mysql_version | replace('_', '') == '57' else '' }}.cnf.j2 + dest: "{{ data_dir }}/node{{ item }}/my.cnf" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Prepare docker container and install MySQL + include_tasks: ./tasks/prepare_install_mysql.yml + vars: + index: "{{ item }}" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Reset configuration for all nodes + shell: docker exec {{ container_prefix}}{{ item }} mysql -uroot -p{{ root_password }} -e "RESET BINARY LOGS AND GTIDS; RESET REPLICA ALL;" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + ignore_errors: yes + +- name: Configure replica servers (container2-containerN) for MySQL 8.0 and above + shell: > + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e " + CHANGE REPLICATION SOURCE TO + SOURCE_HOST='{{ container_prefix }}1', + SOURCE_PORT={{ mysql_listen_port }}, + SOURCE_USER='{{ replication_user }}', + SOURCE_PASSWORD='{{ replication_password }}', + SOURCE_AUTO_POSITION=1, + SOURCE_PUBLIC_KEY_PATH='', + GET_SOURCE_PUBLIC_KEY=1; + START REPLICA; + " + loop: "{{ range(2, nodes_count | int + 1) | list }}" + when: mysql_version | replace('_', '') | int >= 80 + +- name: Configure replica servers (container2-containerN) for Mysql 5.7 + shell: > + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e " + CHANGE MASTER TO + MASTER_HOST='{{ container_prefix }}1', + MASTER_PORT={{ mysql_listen_port }}, + MASTER_USER='{{ replication_user }}', + MASTER_PASSWORD='{{ replication_password }}', + MASTER_AUTO_POSITION=1; + START SLAVE; + " + loop: "{{ range(2, nodes_count | int + 1) | list }}" + when: mysql_version | replace('_', '') | int < 80 + +- name: Create and seed a test database on primary + shell: > + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e " + CREATE DATABASE testdb; + USE testdb; + CREATE TABLE testdb (id INT PRIMARY KEY, data VARCHAR(100)); + INSERT INTO testdb VALUES (1, 'Initial data from node mysql1'); + " +- name: Check replication status on replica nodes + shell: docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "SHOW REPLICA STATUS\G" + register: replication_status + loop: "{{ range(2, nodes_count | int + 1) | list }}" + changed_when: false + when: mysql_version | replace('_', '') | int >= 80 + +- name: Check replication status on replica nodes + shell: docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "SHOW SLAVE STATUS\G" + register: replication_status + loop: "{{ range(2, nodes_count | int + 1) | list }}" + changed_when: false + when: mysql_version | replace('_', '') | int < 80 + +- name: Set verification instructions + set_fact: + verification_msg: | + MySQL Cluster setup complete with asynchronous replication! + To verify replication is working: + 1. Connect to the primary ({{ container_prefix }}1): + docker exec -it {{ container_prefix }}1 mysql -uroot -p{{ root_password }} + 2. Insert data in the test database: + USE testdb; + INSERT INTO testdb VALUES (100, 'Test replication'); + 3. Connect to replicas and verify data is replicated: + docker exec -it {{ container_prefix }}2 mysql -uroot -p{{ root_password }} + USE testdb; + SELECT * FROM testdb; +- name: Display verification instructions + debug: + msg: "{{ verification_msg | split('\n') }}" diff --git a/pmm_qa/mysql/tasks/mysql-group-replication-setup.yml b/pmm_qa/mysql/tasks/mysql-group-replication-setup.yml new file mode 100644 index 00000000..6c92966e --- /dev/null +++ b/pmm_qa/mysql/tasks/mysql-group-replication-setup.yml @@ -0,0 +1,123 @@ +- name: Generate my.cnf for each node + template: + src: ./data/my-group-replication{{ '-57' if mysql_version | replace('_', '') == '57' else '' }}.cnf.j2 + dest: "{{ data_dir }}/node{{ item }}/my.cnf" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Prepare docker container and install Percona Server for MySQL + include_tasks: ./tasks/prepare_install_mysql.yml + vars: + index: "{{ item }}" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Reset configuration for all nodes for MySQL 8.4 + shell: > + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e"RESET BINARY LOGS AND GTIDS; RESET REPLICA ALL; SET GLOBAL gtid_purged='';" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: mysql_version | replace('_', '') | int >= 84 + +- name: Reset configuration for all nodes for MySQL 8.0 + shell: > + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "RESET MASTER; RESET SLAVE ALL;" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: mysql_version | replace('_', '') | int < 84 + +- name: Init configuration for group replication (single exec per node, no binlog) for MySQL 8.0+ + shell: | + docker exec -i {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} <<'EOSQL' + SET SQL_LOG_BIN=0; + CREATE USER IF NOT EXISTS '{{ replication_user }}'@'%' IDENTIFIED BY '{{ replication_password }}'; + GRANT REPLICATION SLAVE ON *.* TO '{{ replication_user }}'@'%'; + GRANT CONNECTION_ADMIN ON *.* TO '{{ replication_user }}'@'%'; + GRANT BACKUP_ADMIN ON *.* TO '{{ replication_user }}'@'%'; + GRANT GROUP_REPLICATION_STREAM ON *.* TO '{{ replication_user }}'@'%'; + GRANT SERVICE_CONNECTION_ADMIN ON *.* TO '{{ replication_user }}'@'%'; + GRANT SYSTEM_VARIABLES_ADMIN ON *.* TO '{{ replication_user }}'@'%'; + FLUSH PRIVILEGES; + -- Configure recovery channel credentials (explicit host/port to primary node) + CHANGE REPLICATION SOURCE TO + SOURCE_USER='{{ replication_user }}', + SOURCE_PASSWORD='{{ replication_password }}' + FOR CHANNEL 'group_replication_recovery'; + SET SQL_LOG_BIN=1; + EOSQL + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: mysql_version | replace('_', '') | int >= 80 + +- name: Init configuration for group replication (single exec per node, no binlog) for MySQL 5.7 + shell: | + docker exec -i {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} <<'EOSQL' + SET SQL_LOG_BIN=0; + CREATE USER IF NOT EXISTS 'repl_user'@'%' IDENTIFIED BY 'GRgrO9301RuF'; + GRANT REPLICATION SLAVE ON *.* TO 'repl_user'@'%'; + FLUSH PRIVILEGES; + -- Configure recovery channel credentials (explicit host/port to primary node) + CHANGE MASTER TO + MASTER_USER='{{ replication_user }}', + MASTER_PASSWORD='{{ replication_password }}' + FOR CHANNEL 'group_replication_recovery'; + SET SQL_LOG_BIN=1; + EOSQL + loop: "{{ range(1, nodes_count | int + 1) | list }}" + when: mysql_version | replace('_', '') | int < 80 + +- name: Bootstrap first node (primary) for MySQL 8.4/8.0 + shell: | + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "SET GLOBAL group_replication_bootstrap_group=ON;" + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "START GROUP_REPLICATION;" + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "SET GLOBAL group_replication_bootstrap_group=OFF;" +- name: Wait for bootstrap to complete + pause: + seconds: 10 + +- name: Start group replication on other nodes + shell: | + docker exec {{ container_prefix }}{{ item }} mysql -uroot -p{{ root_password }} -e "START GROUP_REPLICATION;" + loop: "{{ range(2, nodes_count | int + 1) | list }}" + +- name: Wait 10 seconds for the other nodes to join + pause: + seconds: 10 + +- name: Create and seed a test database on primary + shell: > + docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e " + CREATE DATABASE testdb; + USE testdb; + CREATE TABLE testdb (id INT PRIMARY KEY, data VARCHAR(100)); + INSERT INTO testdb VALUES (1, 'Initial data from node mysql1'); + " +- name: Check replication status on first node + shell: docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "SELECT * FROM performance_schema.replication_group_members;" + register: replication_status + +- name: Display replication status + debug: + var: replication_status.stdout + +- name: Check replication group members count + shell: docker exec {{ container_prefix }}1 mysql -uroot -p{{ root_password }} -e "SELECT COUNT(*) AS count FROM performance_schema.replication_group_members;" + register: member_count + +- name: Display member count + debug: + var: member_count.stdout + +- name: Set verification instructions + set_fact: + verification_msg: | + MySQL Cluster setup complete! + To verify replication is working: + 1. Connect to the first node: + docker exec -it {{ container_prefix }}1 mysql -uroot -p{{ root_password }} + 2. Insert data in the test database: + USE testdb; + INSERT INTO testdb VALUES (100, 'Test replication'); + 3. Connect to other nodes and verify data is replicated: + docker exec -it {{ container_prefix }}2 mysql -uroot -p{{ root_password }} + USE testdb; + SELECT * FROM testdb; + +- name: Display verification instructions + debug: + msg: "{{ verification_msg | split('\n') }}" diff --git a/pmm_qa/mysql/tasks/mysql-single.yml b/pmm_qa/mysql/tasks/mysql-single.yml new file mode 100644 index 00000000..7040cf3f --- /dev/null +++ b/pmm_qa/mysql/tasks/mysql-single.yml @@ -0,0 +1,11 @@ +- name: Generate my.cnf for each node + template: + src: ./data/my.cnf.j2 + dest: "{{ data_dir }}/node{{ item }}/my.cnf" + loop: "{{ range(1, nodes_count | int + 1) | list }}" + +- name: Prepare docker container and install Percona Server for MySQL + include_tasks: ./tasks/prepare_install_mysql.yml + vars: + index: "{{ item }}" + loop: "{{ range(1, nodes_count | int + 1) | list }}" diff --git a/pmm_qa/mysql/tasks/prepare_install_mysql.yml b/pmm_qa/mysql/tasks/prepare_install_mysql.yml new file mode 100644 index 00000000..be24a9c6 --- /dev/null +++ b/pmm_qa/mysql/tasks/prepare_install_mysql.yml @@ -0,0 +1,123 @@ +- name: Prepare Container for MySQL 8.0+ + shell: | + docker run --rm -d --name="{{ container_prefix }}{{ item }}" \ + --network="pmm-qa" \ + --privileged --cgroupns=host -v /sys/fs/cgroup:/sys/fs/cgroup:rw \ + -v /var/lib/containerd \ + -v ./ssl:/ssl \ + antmelekhin/docker-systemd:ubuntu-24.04 + when: mysql_version | replace('_', '') | int >= 80 + +- name: Prepare Container for MySQL 5.7 + shell: | + docker run --rm -d --name="{{ container_prefix }}{{ item }}" \ + --network="pmm-qa" \ + --privileged --cgroupns=host -v /sys/fs/cgroup:/sys/fs/cgroup:rw \ + -v /var/lib/containerd \ + -v ./ssl:/ssl \ + antmelekhin/docker-systemd:ubuntu-22.04 + when: mysql_version | replace('_', '') | int < 80 + +- name: Install dependencies + shell: | + docker exec {{ container_prefix }}{{ index }} apt-get update + docker exec {{ container_prefix }}{{ index }} apt-get -y install wget gnupg2 lsb-release curl xz-utils libnuma1 +- name: Install MySQL 8.0+ dependencies + shell: | + docker exec {{ container_prefix }}{{ index }} apt-get update + docker exec {{ container_prefix }}{{ index }} apt-get install -y libncurses6 libaio1t64 + docker exec {{ container_prefix }}{{ index }} ln -s /usr/lib/x86_64-linux-gnu/libaio.so.1t64 /usr/lib/x86_64-linux-gnu/libaio.so.1 + when: mysql_version | replace('_', '') | int >= 80 + +- name: Install MySQL 5.7 dependencies + shell: | + docker exec {{ container_prefix }}{{ index }} apt-get update + docker exec {{ container_prefix }}{{ index }} apt-get install -y libaio1 libncurses5 + when: mysql_version | replace('_', '') | int == 57 + +- name: Query Docker Hub for MySQL tags + uri: + url: "https://hub.docker.com/v2/repositories/library/mysql/tags?page_size=100&name={{ mysql_version | replace('_', '.') }}" + method: GET + return_content: yes + register: mysql_tags_response + +- name: Extract tag names (only versions) + set_fact: + mysql_exact_version: "{{ mysql_tags_response.json.results | map(attribute='name') | list | select('match', '^[0-9]+\\.[0-9]+\\.[0-9]+$') | first }}" + +- name: Install MySQL 8.0+ + shell: | + docker exec {{ container_prefix }}{{ index }} wget -q -O mysql.tar.xz https://dev.mysql.com/get/Downloads/MySQL-{{ mysql_version | replace('_', '.') }}/mysql-{{ mysql_exact_version }}-linux-glibc2.28-x86_64.tar.xz + docker exec {{ container_prefix }}{{ index }} tar -xf mysql.tar.xz + docker exec {{ container_prefix }}{{ index }} groupadd mysql + docker exec {{ container_prefix }}{{ index }} useradd -r -g mysql -s /bin/false mysql + docker exec {{ container_prefix }}{{ index }} mv mysql-{{ mysql_exact_version }}-linux-glibc2.28-x86_64 /usr/local/mysql + docker exec {{ container_prefix }}{{ index }} ln -s /usr/local/mysql-{{ mysql_exact_version }}-linux-glibc2.28-x86_64 mysql + docker exec {{ container_prefix }}{{ index }} chown -R mysql:mysql /usr/local/mysql + docker exec {{ container_prefix }}{{ index }} mkdir /usr/local/mysql/data + docker exec {{ container_prefix }}{{ index }} chown mysql:mysql /usr/local/mysql/data + docker exec {{ container_prefix }}{{ index }} /usr/local/mysql/bin/mysqld --initialize --user=mysql --basedir=/usr/local/mysql --datadir=/usr/local/mysql/data + docker exec {{ container_prefix }}{{ index }} cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysql + docker exec {{ container_prefix }}{{ index }} chmod +x /etc/init.d/mysql + docker exec {{ container_prefix }}{{ index }} sh -c "echo 'export PATH=/usr/local/mysql/bin:\$PATH' >> /etc/profile" + docker exec {{ container_prefix }}{{ index }} ln -s /usr/local/mysql/bin/mysql /usr/bin/mysql + docker exec {{ container_prefix }}{{ index }} bash -c 'source ~/.bashrc' + register: mysql_80_root_password + when: mysql_version | replace('_', '') | int >= 80 + +- name: Install MySQL 5.7 + shell: | + docker exec {{ container_prefix }}{{ index }} wget -q -O mysql.tar.gz https://downloads.mysql.com/archives/get/p/23/file/mysql-5.7.44-linux-glibc2.12-x86_64.tar.gz + docker exec {{ container_prefix }}{{ index }} tar zxf mysql.tar.gz + docker exec {{ container_prefix }}{{ index }} groupadd mysql + docker exec {{ container_prefix }}{{ index }} useradd -r -g mysql -s /bin/false mysql + docker exec {{ container_prefix }}{{ index }} mv mysql-5.7.44-linux-glibc2.12-x86_64 /usr/local/mysql + docker exec {{ container_prefix }}{{ index }} ln -s /usr/local/mysql-5.7.44-linux-glibc2.12-x86_64 mysql + docker exec {{ container_prefix }}{{ index }} chown -R mysql:mysql /usr/local/mysql + docker exec {{ container_prefix }}{{ index }} mkdir /usr/local/mysql/data + docker exec {{ container_prefix }}{{ index }} chown mysql:mysql /usr/local/mysql/data + docker exec {{ container_prefix }}{{ index }} /usr/local/mysql/bin/mysqld --initialize --user=mysql --basedir=/usr/local/mysql --datadir=/usr/local/mysql/data + docker exec {{ container_prefix }}{{ index }} cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysql + docker exec {{ container_prefix }}{{ index }} chmod +x /etc/init.d/mysql + docker exec {{ container_prefix }}{{ index }} sh -c "echo 'export PATH=/usr/local/mysql/bin:\$PATH' >> /etc/profile" + docker exec {{ container_prefix }}{{ index }} ln -s /usr/local/mysql/bin/mysql /usr/bin/mysql + docker exec {{ container_prefix }}{{ index }} bash -c 'source ~/.bashrc' + register: mysql_57_root_password + when: mysql_version | replace('_', '') | int == 57 + +- debug: var=mysql_80_root_password + when: mysql_version | replace('_', '') | int >= 80 + +- name: Store Mysql root password for MySQL 8.0+ + set_fact: + mysql_root_password: "{{ mysql_80_root_password.stderr | regex_search('root@localhost: (.+)', '\\1') | first }}" + when: mysql_version | replace('_', '') | int >= 80 + +- name: Store Mysql root password for MySQL 5.7 + set_fact: + mysql_root_password: "{{ mysql_57_root_password.stderr | regex_search('root@localhost: (.+)', '\\1') | first }}" + when: mysql_version | replace('_', '') | int == 57 + +- name: Start MySQL + shell: | + docker exec {{ container_prefix }}{{ index }} systemctl enable mysql + docker exec {{ container_prefix }}{{ index }} systemctl start mysql +- name: Copy config file to docker container + shell: | + docker exec {{ container_prefix }}{{ item }} mkdir -p /etc/mysql + docker cp {{ data_dir }}/node{{ index }}/my.cnf {{ container_prefix }}{{ item }}:/etc/mysql/my.cnf +- name: Restart Percona Server for MySQL + shell: docker exec {{ container_prefix }}{{ index }} systemctl restart mysql + +- name: Wait 5 seconds for MySQL to start + pause: + seconds: 5 + +- name: Chance root password Percona Server for MySQL 5.7 + shell: "docker exec {{ container_prefix }}{{ index }} mysql --connect-expired-password -uroot -p'{{ mysql_root_password }}' -e \"ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY '{{ root_password }}';\"" + when: mysql_version|replace('_', '')|int < 80 + +- name: Chance root password Percona Server for MySQL 8.0+ + shell: "docker exec {{ container_prefix }}{{ index }} mysql --connect-expired-password -uroot -p'{{ mysql_root_password }}' -e \"ALTER USER 'root'@'localhost' IDENTIFIED WITH caching_sha2_password BY '{{ root_password }}';\"" + when: mysql_version|replace('_', '')|int >= 80 diff --git a/pmm_qa/pmm-framework.py b/pmm_qa/pmm-framework.py index 5084b621..641d7a1b 100755 --- a/pmm_qa/pmm-framework.py +++ b/pmm_qa/pmm-framework.py @@ -89,7 +89,6 @@ def setup_mysql(db_type, db_version=None, db_config=None, args=None): # Gather Version details ms_version = os.getenv('MS_VERSION') or db_version or database_configs[db_type]["versions"][-1] - ms_version_int = int(ms_version.replace(".", "")) # Check Setup Types setup_type = '' @@ -107,6 +106,7 @@ def setup_mysql(db_type, db_version=None, db_config=None, args=None): 'GROUP_REPLICATION': setup_type, 'MS_NODES': no_of_nodes, 'MS_VERSION': ms_version, + 'SETUP_TYPE': setup_type_value, 'PMM_SERVER_IP': args.pmm_server_ip or container_name or '127.0.0.1', 'MS_CONTAINER': 'mysql_pmm_' + str(ms_version), 'CLIENT_VERSION': get_value('CLIENT_VERSION', db_type, args, db_config), @@ -116,11 +116,7 @@ def setup_mysql(db_type, db_version=None, db_config=None, args=None): 'PMM_QA_GIT_BRANCH': os.getenv('PMM_QA_GIT_BRANCH') or 'v3' } - # Ansible playbook filename - playbook_filename = 'ms_pmm_setup.yml' - - # Call the function to run the Ansible playbook - run_ansible_playbook(playbook_filename, env_vars, args) + run_ansible_playbook('mysql/mysql-setup.yml', env_vars, args) def setup_ssl_mysql(db_type, db_version=None, db_config=None, args=None): # Check if PMM server is running From 29ca32a59c11525d227daeac1f4dc1ecd4bdec99 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 15:26:10 +0100 Subject: [PATCH 05/39] PMM-13992: Remove old Percona server setup --- .../data/init-async-replication.sql.j2 | 13 - .../data/init-group-replication.sql.j2 | 15 - .../data/my-async-replication.cnf.j2 | 43 --- .../data/my-group-replication.cnf.j2 | 45 --- pmm_qa/percona_server/data/my.cnf.j2 | 2 - pmm_qa/percona_server/data/ps_load.sql | 94 ------ .../percona_server/percona-server-setup.yml | 269 ------------------ ...percona-server-async-replication-setup.yml | 129 --------- ...percona-server-group-replication-setup.yml | 127 --------- .../tasks/percona-server-setup-single.yml | 31 -- 10 files changed, 768 deletions(-) delete mode 100644 pmm_qa/percona_server/data/init-async-replication.sql.j2 delete mode 100644 pmm_qa/percona_server/data/init-group-replication.sql.j2 delete mode 100644 pmm_qa/percona_server/data/my-async-replication.cnf.j2 delete mode 100644 pmm_qa/percona_server/data/my-group-replication.cnf.j2 delete mode 100644 pmm_qa/percona_server/data/my.cnf.j2 delete mode 100644 pmm_qa/percona_server/data/ps_load.sql delete mode 100644 pmm_qa/percona_server/percona-server-setup.yml delete mode 100644 pmm_qa/percona_server/tasks/percona-server-async-replication-setup.yml delete mode 100644 pmm_qa/percona_server/tasks/percona-server-group-replication-setup.yml delete mode 100644 pmm_qa/percona_server/tasks/percona-server-setup-single.yml diff --git a/pmm_qa/percona_server/data/init-async-replication.sql.j2 b/pmm_qa/percona_server/data/init-async-replication.sql.j2 deleted file mode 100644 index a96ef137..00000000 --- a/pmm_qa/percona_server/data/init-async-replication.sql.j2 +++ /dev/null @@ -1,13 +0,0 @@ --- Create replication user and grant necessary privileges -SET SQL_LOG_BIN=0; -CREATE USER '{{ replication_user }}'@'%' IDENTIFIED WITH 'caching_sha2_password' BY '{{ replication_password }}' REQUIRE NONE; -GRANT REPLICATION SLAVE ON *.* TO '{{ replication_user }}'@'%'; -GRANT CONNECTION_ADMIN ON *.* TO '{{ replication_user }}'@'%'; -GRANT BACKUP_ADMIN ON *.* TO '{{ replication_user }}'@'%'; -FLUSH PRIVILEGES; -SET SQL_LOG_BIN=1; - -{% if item == 1 %} --- Primary server: enable binary logging for replication -FLUSH BINARY LOGS; -{% endif %} diff --git a/pmm_qa/percona_server/data/init-group-replication.sql.j2 b/pmm_qa/percona_server/data/init-group-replication.sql.j2 deleted file mode 100644 index 19185831..00000000 --- a/pmm_qa/percona_server/data/init-group-replication.sql.j2 +++ /dev/null @@ -1,15 +0,0 @@ --- Create replication user and grant necessary privileges -SET SQL_LOG_BIN=0; -CREATE USER '{{ replication_user }}'@'%' IDENTIFIED BY '{{ replication_password }}'; -GRANT REPLICATION SLAVE ON *.* TO '{{ replication_user }}'@'%'; -GRANT CONNECTION_ADMIN ON *.* TO '{{ replication_user }}'@'%'; -GRANT BACKUP_ADMIN ON *.* TO '{{ replication_user }}'@'%'; -GRANT GROUP_REPLICATION_STREAM ON *.* TO '{{ replication_user }}'@'%'; --- GRANT SERVICE_CONNECTION_ADMIN ON *.* TO '{{ replication_user }}'@'%'; --- GRANT SYSTEM_VARIABLES_ADMIN ON *.* TO '{{ replication_user }}'@'%'; -FLUSH PRIVILEGES; -SET SQL_LOG_BIN=1; - --- Configure group replication recovery credentials -CHANGE REPLICATION SOURCE TO SOURCE_USER='{{ replication_user }}', SOURCE_PASSWORD='{{ replication_password }}' FOR CHANNEL 'group_replication_recovery'; - diff --git a/pmm_qa/percona_server/data/my-async-replication.cnf.j2 b/pmm_qa/percona_server/data/my-async-replication.cnf.j2 deleted file mode 100644 index 014d37f7..00000000 --- a/pmm_qa/percona_server/data/my-async-replication.cnf.j2 +++ /dev/null @@ -1,43 +0,0 @@ -[mysqld] -# General server configuration -server_id={{ item }} -bind-address=0.0.0.0 -port={{ mysql_listen_port }} -userstat=1 - -# Authentication settings for caching_sha2_password -caching_sha2_password_auto_generate_rsa_keys=ON -# The following two parameters tell MySQL where to store the RSA key pair -caching_sha2_password_private_key_path=private_key.pem -caching_sha2_password_public_key_path=public_key.pem - -# Replication settings -gtid_mode=ON -enforce_gtid_consistency=ON -log_bin=binlog -log_replica_updates=ON -sync_binlog=1 -binlog_checksum=NONE -disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY" -# MacOS-specific, where table names are case-sensitive -lower_case_table_names=2 - -# MySQL 8.4 compatibility settings -report_host=ps_pmm_{{ ps_version }}_{{ item }} - -# Replica configuration - applies to all nodes except primary (they'll be able to become replicas) -{% if item != 1 %} -# Replica specific settings -replica_parallel_workers=4 -replica_parallel_type=LOGICAL_CLOCK -replica_preserve_commit_order=1 -{% endif %} - -# Crash-safe replication settings -relay-log=ps_pmm_{{ ps_version }}_{{ item }}-relay-bin -relay_log_recovery=ON -relay_log_purge=ON - -# Performance and connection settings -max_connections=1000 -innodb_buffer_pool_size=256M diff --git a/pmm_qa/percona_server/data/my-group-replication.cnf.j2 b/pmm_qa/percona_server/data/my-group-replication.cnf.j2 deleted file mode 100644 index 8fcdbbf6..00000000 --- a/pmm_qa/percona_server/data/my-group-replication.cnf.j2 +++ /dev/null @@ -1,45 +0,0 @@ -[mysqld] -# General server configuration -server_id={{ server_id_start + item - 1 }} -bind-address=0.0.0.0 -port={{ mysql_listen_port }} -userstat=1 - -# General replication settings -gtid_mode=ON -enforce_gtid_consistency=ON -binlog_checksum=NONE -log_bin=binlog -log_replica_updates=ON -disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY" -lower_case_table_names=2 # MacOS-specific, but also good generally - -# MySQL 8.4 compatibility settings -report_host=ps_pmm_{{ps_version}}_{{ item }} - -# Group Replication Settings -plugin_load_add='group_replication.so' -loose-group_replication_group_name='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' -loose-group_replication_local_address='ps_pmm_{{ps_version}}_{{ item }}:{{ group_seeds_port }}' -loose-group_replication_group_seeds='{% for i in range(1, nodes_count | int + 1) %}ps_pmm_{{ps_version}}_{{ i }}:{{ group_seeds_port }}{% if not loop.last %},{% endif %}{% endfor %}' -loose-group_replication_communication_stack=XCOM - -# Group replication behavior -loose-group_replication_start_on_boot=OFF -loose-group_replication_bootstrap_group=OFF -loose-group_replication_single_primary_mode=ON -loose-group_replication_enforce_update_everywhere_checks=OFF - -# Recovery settings -loose-group_replication_recovery_get_public_key=ON -loose-group_replication_recovery_retry_count=10 -loose-group_replication_recovery_reconnect_interval=60 - -# Crash-safe replication settings -relay-log=ps_pmm_{{ps_version}}_{{ item }}-relay-bin -relay_log_recovery=ON -relay_log_purge=ON - -# Performance and connection settings -max_connections=1000 -innodb_buffer_pool_size=256M diff --git a/pmm_qa/percona_server/data/my.cnf.j2 b/pmm_qa/percona_server/data/my.cnf.j2 deleted file mode 100644 index fd4b27f2..00000000 --- a/pmm_qa/percona_server/data/my.cnf.j2 +++ /dev/null @@ -1,2 +0,0 @@ -[mysqld] -userstat=1 \ No newline at end of file diff --git a/pmm_qa/percona_server/data/ps_load.sql b/pmm_qa/percona_server/data/ps_load.sql deleted file mode 100644 index 6df95275..00000000 --- a/pmm_qa/percona_server/data/ps_load.sql +++ /dev/null @@ -1,94 +0,0 @@ --- ======================================== --- CREATE TABLES --- ======================================== - -CREATE TABLE students ( - student_id INT AUTO_INCREMENT PRIMARY KEY, - first_name VARCHAR(50), - last_name VARCHAR(50), - birth_date DATE -); - -CREATE TABLE classes ( - class_id INT AUTO_INCREMENT PRIMARY KEY, - name VARCHAR(100), - teacher VARCHAR(100) -); - -CREATE TABLE enrollments ( - enrollment_id INT AUTO_INCREMENT PRIMARY KEY, - student_id INT, - class_id INT, - enrollment_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - FOREIGN KEY (student_id) REFERENCES students(student_id), - FOREIGN KEY (class_id) REFERENCES classes(class_id) -); - --- ======================================== --- INSERT INITIAL DATA --- ======================================== - -INSERT INTO students (first_name, last_name, birth_date) VALUES -('Alice', 'Smith', '2005-04-10'), -('Bob', 'Johnson', '2006-08-15'), -('Charlie', 'Brown', '2004-12-01'); - -INSERT INTO classes (name, teacher) VALUES -('Mathematics', 'Mrs. Taylor'), -('History', 'Mr. Anderson'), -('Science', 'Dr. Reynolds'); - -INSERT INTO enrollments (student_id, class_id) VALUES -(1, 1), -(1, 2), -(2, 2), -(3, 1), -(3, 3); - --- ======================================== --- SELECT: View all data after insert --- ======================================== - --- View all students -SELECT * FROM students; - --- View all classes -SELECT * FROM classes; - --- View all enrollments -SELECT * FROM enrollments; - --- View students enrolled in Mathematics -SELECT s.first_name, s.last_name -FROM students s -JOIN enrollments e ON s.student_id = e.student_id -JOIN classes c ON e.class_id = c.class_id -WHERE c.name = 'Mathematics'; - --- Count students per class -SELECT c.name AS class_name, COUNT(e.student_id) AS student_count -FROM classes c -LEFT JOIN enrollments e ON c.class_id = e.class_id -GROUP BY c.name; - --- ======================================== --- UPDATE DATA --- ======================================== - -UPDATE students -SET last_name = 'Williams' -WHERE first_name = 'Bob' AND last_name = 'Johnson'; - -UPDATE classes -SET teacher = 'Ms. Carter' -WHERE name = 'History'; - --- ======================================== --- DELETE DATA --- ======================================== - -DELETE FROM enrollments -WHERE student_id = (SELECT student_id FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'); - -DELETE FROM students -WHERE first_name = 'Alice' AND last_name = 'Smith'; \ No newline at end of file diff --git a/pmm_qa/percona_server/percona-server-setup.yml b/pmm_qa/percona_server/percona-server-setup.yml deleted file mode 100644 index e4e00a75..00000000 --- a/pmm_qa/percona_server/percona-server-setup.yml +++ /dev/null @@ -1,269 +0,0 @@ ---- -# Percona Server 8.4 and higher single instance and also Cluster with Group Replication -- name: Setup Percona Server 8.4 and higher. Cluster with Group Replication in Docker - hosts: localhost - connection: local - gather_facts: yes - vars: - ps_version: "{{ lookup('env', 'PS_VERSION') | default('8.4', true) }}" - cluster_name: "mysql_cluster" - replication_user: "repl_user" - replication_password: "GRgrO9301RuF" - root_password: "GRgrO9301RuF" - mysql_port: 33066 - mysql_listen_port: 3306 - group_seeds_port: 34061 - nodes_count: "{{ (lookup('env', 'NODES_COUNT') | default('3', true)) | int }}" - network_name: "pmm-qa" - data_dir: "{{ lookup('env', 'HOME') }}/mysql_cluster_data" - server_id_start: 1 - pmm_server_ip: "{{ lookup('vars', 'extra_pmm_server_ip', default=lookup('env','PMM_SERVER_IP') | default('127.0.0.1', true) ) }}" - client_version: "{{ lookup('vars', 'extra_client_version', default=lookup('env','CLIENT_VERSION') | default('3-dev-latest', true) ) }}" - admin_password: "{{ lookup('vars', 'extra_admin_password', default=lookup('env','ADMIN_PASSWORD') | default('admin', true) ) }}" - query_source: "{{ lookup('env', 'QUERY_SOURCE') | default('perfschema', true) }}" - metrics_mode: "{{ lookup('env', 'metrics_mode') }}" - setup_type: "{{ lookup('env', 'SETUP_TYPE') }}" - random_service_name_value: "" - my_rocks: "{{ lookup('env', 'MY_ROCKS') | default(false, true) }}" - - - tasks: - - name: Mofidy the node count for group replication - set_fact: - nodes_count: 3 - when: nodes_count | int < 3 and setup_type == "gr" - - - name: Chance to correct nodes count for async replication - set_fact: - nodes_count: 2 - when: nodes_count | int < 2 and setup_type == "replication" - - - name: Create Docker network - community.docker.docker_network: - name: "{{ network_name }}" - state: present - - - name: Remove old data folders - shell: 'rm -fr {{ data_dir }}' - loop: "{{ range(1, nodes_count | int + 1) | list }}" - - - name: Create data directories - file: - path: "{{ data_dir }}/node{{ item }}/data" - state: directory - mode: '0755' - loop: "{{ range(1, nodes_count | int + 1) | list }}" - - - name: Remove old percona server containers - community.docker.docker_container: - name: "ps_pmm_{{ ps_version }}_{{ item }}" - image: "percona/percona-server:{{ ps_version }}" - restart_policy: always - state: absent - loop: "{{ range(1, nodes_count | int + 1) | list }}" - ignore_errors: yes - - - name: Recursively change ownership of a directory - shell: "sudo chown -R 1001:1001 {{ data_dir }}/node{{ item }}/data" - loop: "{{ range(1, nodes_count | int + 1) | list }}" - - - name: Setup Percona Server group replication - include_tasks: ./tasks/percona-server-group-replication-setup.yml - when: setup_type == "gr" - - - name: Setup Percona Server with async replication - include_tasks: ./tasks/percona-server-async-replication-setup.yml - when: setup_type == "replication" - - - name: Setup Percona Server - include_tasks: tasks/percona-server-setup-single.yml - when: setup_type != "gr" and setup_type != "replication" - - - name: Wait 10 seconds for setup to finish - pause: - seconds: 10 - - - name: Create slowlog configuration for mysql nodes - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: > - mysql -uroot -p{{ root_password }} -e " - SET GLOBAL slow_query_log='ON'; - SET GLOBAL long_query_time=0; - SET GLOBAL log_slow_admin_statements=ON; - SET GLOBAL log_slow_slave_statements=ON; - " - loop: "{{ range(1, nodes_count | int + 1) | list }}" - when: query_source == "slowlog" - - - name: Install and add pmm client. - include_tasks: ../tasks/install_pmm_client.yml - vars: - container_name: "ps_pmm_{{ ps_version }}_{{ item }}" - loop: "{{ range(1, nodes_count | int + 1) | list }}" - - - name: Generate random service name suffix - set_fact: - random_service_name_value: "_{{ 99999 | random + 1 }}" - - - name: Add service to pmm server - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --environment=ps-gr-dev --cluster=ps-gr-dev-cluster --replication-set=ps-gr-replication ps_pmm_{{ ps_version }}_{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:3306 - loop: "{{ range(1, nodes_count | int + 1) | list }}" - when: setup_type == "gr" - - - name: Add service to pmm server - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --environment=ps-replication-dev --cluster=ps-replication-dev-cluster --replication-set=ps-async-replication ps_pmm_{{ ps_version }}_{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:3306 - loop: "{{ range(1, nodes_count | int + 1) | list }}" - when: setup_type == "replication" - - - name: Add service to pmm server - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: pmm-admin add mysql --query-source={{ query_source }} --username=root --password={{ root_password }} --cluster=ps-single-dev-cluster --environment=ps-dev ps_pmm_{{ ps_version }}_{{ item }}{{ random_service_name_value }} --debug 127.0.0.1:3306 - loop: "{{ range(1, nodes_count | int + 1) | list }}" - when: setup_type != "gr" and setup_type != "replication" - - - name: Install sysbench inside of all percona server nodes - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - user: "root" - command: > - /bin/sh -c " - wget -O epel-release.rpm --progress=dot:giga https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && - rpm -i epel-release.rpm && - microdnf install -y sysbench - " - loop: "{{ range(1, nodes_count | int + 1) | list }}" - - - name: Prepare sysbench inside of all percona server nodes - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: > - mysql -uroot -p{{ root_password }} -e " - SET GLOBAL super_read_only = OFF; - SET GLOBAL read_only = OFF; - " - loop: "{{ range(1, nodes_count | int + 1) | list }}" - - - name: Prepare sysbench inside of all percona server nodes - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: > - mysql -uroot -p{{ root_password }} -e " - CREATE DATABASE sbtest; - CREATE USER 'sbtest'@'localhost' IDENTIFIED BY 'password'; - GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'localhost'; - CREATE USER 'sbtest'@'127.0.0.1' IDENTIFIED BY 'password'; - GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'127.0.0.1'; - FLUSH PRIVILEGES; - " - loop: "{{ range(1, nodes_count | int + 1) | list }}" - when: setup_type != "gr" and setup_type != "replication" - - - name: Prepare sysbench inside of all percona server nodes - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_1" - command: > - mysql -uroot -p{{ root_password }} -e " - CREATE DATABASE sbtest; - CREATE USER 'sbtest'@'localhost' IDENTIFIED BY 'password'; - GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'localhost'; - CREATE USER 'sbtest'@'127.0.0.1' IDENTIFIED BY 'password'; - GRANT ALL PRIVILEGES ON *.* TO 'sbtest'@'127.0.0.1'; - FLUSH PRIVILEGES; - " - when: setup_type == "gr" or setup_type == "replication" - - - name: Prepare data for sysbench inside of all percona server nodes - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: > - sysbench /usr/share/sysbench/oltp_read_write.lua - --mysql-host=127.0.0.1 - --mysql-port=3306 - --mysql-user=sbtest - --mysql-password=password - --mysql-db=sbtest - --tables=10 - --table-size=100000 - prepare - when: setup_type != "gr" and setup_type != "replication" - loop: "{{ range(1, nodes_count | int + 1) | list }}" - - - name: Prepare data for sysbench inside of first percona server nodes - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_1" - command: > - sysbench /usr/share/sysbench/oltp_read_write.lua - --mysql-host=127.0.0.1 - --mysql-port=3306 - --mysql-user=sbtest - --mysql-password=password - --mysql-db=sbtest - --tables=10 - --table-size=100000 - prepare - when: setup_type == "gr" or setup_type == "replication" - - - name: Run load for sysbench inside of all percona server nodes - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: > - sysbench /usr/share/sysbench/oltp_read_write.lua - --mysql-host=127.0.0.1 - --mysql-port=3306 - --mysql-user=sbtest - --mysql-password=password - --mysql-db=sbtest - --tables=10 - --table-size=100000 - --threads=16 - --time=60 - run - loop: "{{ range(1, nodes_count | int + 1) | list }}" - - - name: Copy a load file into the container - community.docker.docker_container_copy_into: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - path: ../data/mysql_load.sql - container_path: /ps_load.sql - loop: "{{ range(1, nodes_count | int + 1) | list }}" - - - name: Wait 10 seconds for node to be connected - pause: - seconds: 10 - - - name: Run load inside of first percona server node - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_1" - command: > - /bin/sh -c ' - mysql -uroot -p{{ root_password }} -e "CREATE DATABASE school;" - mysql -uroot -p{{ root_password }} school < /ps_load.sql - ' - when: setup_type == "gr" or setup_type == "replication" - - - name: Run load inside of all percona server nodes - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: > - /bin/sh -c ' - mysql -uroot -p{{ root_password }} -e "CREATE DATABASE school;" - mysql -uroot -p{{ root_password }} school < /ps_load.sql - ' - loop: "{{ range(1, nodes_count | int + 1) | list }}" - when: setup_type != "gr" and setup_type != "replication" - - - name: Enable MySQL MyRocks - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: > - /bin/sh -c ' - ps-admin --enable-rocksdb -u root -p{{ root_password }} - ' - loop: "{{ range(1, nodes_count | int + 1) | list }}" - when: my_rocks | bool diff --git a/pmm_qa/percona_server/tasks/percona-server-async-replication-setup.yml b/pmm_qa/percona_server/tasks/percona-server-async-replication-setup.yml deleted file mode 100644 index 67cebb47..00000000 --- a/pmm_qa/percona_server/tasks/percona-server-async-replication-setup.yml +++ /dev/null @@ -1,129 +0,0 @@ -- name: Generate my.cnf for each node - template: - src: ./data/my-async-replication.cnf.j2 - dest: "{{ data_dir }}/node{{ item }}/my.cnf" - loop: "{{ range(1, nodes_count | int + 1) | list }}" - -- name: Create initialization script for each node - template: - src: ./data/init-async-replication.sql.j2 - dest: "{{ data_dir }}/node{{ item }}/init.sql" - loop: "{{ range(1, nodes_count | int + 1) | list }}" - -- name: Start Percona Server containers with async replication - community.docker.docker_container: - name: "ps_pmm_{{ ps_version }}_{{ item }}" - image: "percona/percona-server:{{ ps_version }}" - restart_policy: always - state: started - networks: - - name: "{{ network_name }}" - env: - MYSQL_ROOT_PASSWORD: "{{ root_password }}" - ports: - - "{{ mysql_port + item - 1 }}:{{ mysql_listen_port }}" - volumes: - - "{{ data_dir }}/node{{ item }}/data:/var/lib/mysql" - - "{{ data_dir }}/node{{ item }}/my.cnf:/etc/mysql/my.cnf" - - "{{ data_dir }}/node{{ item }}/init.sql:/docker-entrypoint-initdb.d/init.sql" - loop: "{{ range(1, nodes_count | int + 1) | list }}" - -- name: Wait for MySQL to be available - wait_for: - host: localhost - port: "{{ mysql_port + item - 1 }}" - delay: 10 - timeout: 300 - loop: "{{ range(1, nodes_count | int + 1) | list }}" - -- name: Wait 5 seconds for percona server start to complete - pause: - seconds: 5 - -- name: Reset configuration for all nodes - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: > - mysql -uroot -p{{ root_password }} -e " - RESET BINARY LOGS AND GTIDS; - RESET REPLICA ALL; - " - loop: "{{ range(1, nodes_count | int + 1) | list }}" - ignore_errors: yes - -- name: Get primary ps_pmm_{{ ps_version }}_1 binary log status - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_1" - command: > - mysql -uroot -p{{ root_password }} -e " - SHOW BINARY LOG STATUS\G - " - register: primary_status - changed_when: false - - -- name: Display binary log status for primary - debug: - msg: "{{ primary_status.stdout | split('\n') }}" - -- name: Configure replica servers (container2-containerN) - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: > - mysql -uroot -p{{ root_password }} -e " - CHANGE REPLICATION SOURCE TO - SOURCE_HOST='ps_pmm_{{ ps_version }}_1', - SOURCE_PORT={{ mysql_listen_port }}, - SOURCE_USER='{{ replication_user }}', - SOURCE_PASSWORD='{{ replication_password }}', - SOURCE_AUTO_POSITION=1, - SOURCE_PUBLIC_KEY_PATH='', - GET_SOURCE_PUBLIC_KEY=1; - START REPLICA; - " - loop: "{{ range(2, nodes_count | int + 1) | list }}" - -- name: Create and seed a test database on primary - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_1" - command: > - mysql -uroot -p{{ root_password}} -e " - CREATE DATABASE testdb; - USE testdb; - CREATE TABLE testdb (id INT PRIMARY KEY, data VARCHAR(100)); - INSERT INTO testdb VALUES (1, 'Initial data from node mysql1');" - -- name: Check replication status on replica nodes - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: mysql -uroot -p{{ root_password }} -e "SHOW REPLICA STATUS\G" - register: replication_status - loop: "{{ range(2, nodes_count | int + 1) | list }}" - changed_when: false - -- name: Display replication status for each replica - debug: - msg: "{{ replication_status.results[item - 2].stdout_lines }}" - loop: "{{ range(2, nodes_count | int + 1) | list }}" - -- name: Set verification instructions - set_fact: - verification_msg: | - MySQL Cluster setup complete with asynchronous replication! - - To verify replication is working: - 1. Connect to the primary (ps_pmm_{{ ps_version }}_1): - docker exec -it ps_pmm_{{ ps_version }}_1 mysql -uroot -p{{ root_password }} - - 2. Insert data in the test database: - USE testdb; - INSERT INTO testdb VALUES (100, 'Test replication'); - - 3. Connect to replicas and verify data is replicated: - docker exec -it ps_pmm_{{ ps_version }}_2 mysql -uroot -p{{ root_password }} - USE testdb; - SELECT * FROM testdb; - -- name: Display verification instructions - debug: - msg: "{{ verification_msg | split('\n') }}" diff --git a/pmm_qa/percona_server/tasks/percona-server-group-replication-setup.yml b/pmm_qa/percona_server/tasks/percona-server-group-replication-setup.yml deleted file mode 100644 index cd179239..00000000 --- a/pmm_qa/percona_server/tasks/percona-server-group-replication-setup.yml +++ /dev/null @@ -1,127 +0,0 @@ -- name: Generate my.cnf for each node - template: - src: ./data/my-group-replication.cnf.j2 - dest: "{{ data_dir }}/node{{ item }}/my.cnf" - loop: "{{ range(1, nodes_count | int + 1) | list }}" - -- name: Create initialization script for each node - template: - src: ./data/init-group-replication.sql.j2 - dest: "{{ data_dir }}/node{{ item }}/init.sql" - loop: "{{ range(1, nodes_count | int + 1) | list }}" - -- name: Start Percona Server containers with group replication - community.docker.docker_container: - name: "ps_pmm_{{ ps_version }}_{{ item }}" - image: "percona/percona-server:{{ ps_version }}" - restart_policy: always - state: started - networks: - - name: "{{ network_name }}" - env: - MYSQL_ROOT_PASSWORD: "{{ root_password }}" - ports: - - "{{ mysql_port + item - 1 }}:{{ mysql_listen_port }}" - - "{{ group_seeds_port + item - 1 }}:{{ group_seeds_port }}" - volumes: - - "{{ data_dir }}/node{{ item }}/data:/var/lib/mysql" - - "{{ data_dir }}/node{{ item }}/my.cnf:/etc/mysql/my.cnf" - - "{{ data_dir }}/node{{ item }}/init.sql:/docker-entrypoint-initdb.d/init.sql" - loop: "{{ range(1, nodes_count | int + 1) | list }}" - -- name: Wait for MySQL to be available - wait_for: - host: localhost - port: "{{ mysql_port + item - 1 }}" - delay: 10 - timeout: 300 - loop: "{{ range(1, nodes_count | int + 1) | list }}" - -- name: Reset configuration for all nodes - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: > - mysql -uroot -p{{ root_password }} -e " - RESET BINARY LOGS AND GTIDS; - RESET REPLICA ALL; - SET GLOBAL gtid_purged=''; - " - loop: "{{ range(1, nodes_count | int + 1) | list }}" - -- name: Bootstrap first node in the cluster - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_1" - command: > - mysql -uroot -p{{ root_password }} -e " - SET GLOBAL group_replication_bootstrap_group=ON; - START GROUP_REPLICATION; - SET GLOBAL group_replication_bootstrap_group=OFF;" - retries: 5 - delay: 10 - -- name: Wait 5 seconds for bootstrap to complete - pause: - seconds: 5 - -- name: Start group replication on other nodes - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_{{ item }}" - command: mysql -uroot -p{{ root_password }} -e "START GROUP_REPLICATION;" - loop: "{{ range(2, nodes_count | int + 1) | list }}" - ignore_errors: yes - -- name: Wait 10 seconds for the other nodes to join - pause: - seconds: 10 - -- name: Create and seed a test database on primary - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_1" - command: > - mysql -uroot -p{{ root_password}} -e " - CREATE DATABASE testdb; - USE testdb; - CREATE TABLE testdb (id INT PRIMARY KEY, data VARCHAR(100)); - INSERT INTO testdb VALUES (1, 'Initial data from node mysql1');" - -- name: Check replication status on first node - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_1" - command: mysql -uroot -p{{ root_password }} -e "SELECT * FROM performance_schema.replication_group_members;" - register: replication_status - -- name: Display replication status - debug: - var: replication_status.stdout - -- name: Check replication group members count - community.docker.docker_container_exec: - container: "ps_pmm_{{ ps_version }}_1" - command: mysql -uroot -p{{ root_password }} -e "SELECT COUNT(*) AS count FROM performance_schema.replication_group_members;" - register: member_count - -- name: Display member count - debug: - var: member_count.stdout - -- name: Set verification instructions - set_fact: - verification_msg: | - MySQL Cluster setup complete! - - To verify replication is working: - 1. Connect to the first node: - docker exec -it ps_pmm_{{ ps_version }}_1 mysql -uroot -p{{ root_password }} - - 2. Insert data in the test database: - USE testdb; - INSERT INTO testdb VALUES (100, 'Test replication'); - - 3. Connect to other nodes and verify data is replicated: - docker exec -it ps_pmm_{{ ps_version }}_2 mysql -uroot -p{{ root_password }} - USE testdb; - SELECT * FROM testdb; - -- name: Display verification instructions - debug: - msg: "{{ verification_msg | split('\n') }}" diff --git a/pmm_qa/percona_server/tasks/percona-server-setup-single.yml b/pmm_qa/percona_server/tasks/percona-server-setup-single.yml deleted file mode 100644 index 9fbad84c..00000000 --- a/pmm_qa/percona_server/tasks/percona-server-setup-single.yml +++ /dev/null @@ -1,31 +0,0 @@ -- name: Generate my.cnf for each node - template: - src: ./data/my.cnf.j2 - dest: "{{ data_dir }}/node{{ item }}/my.cnf" - loop: "{{ range(1, nodes_count | int + 1) | list }}" - -- name: Start Percona Server containers - community.docker.docker_container: - name: "ps_pmm_{{ ps_version }}_{{ item }}" - image: "percona/percona-server:{{ ps_version }}" - restart_policy: always - state: started - networks: - - name: "{{ network_name }}" - env: - MYSQL_ROOT_PASSWORD: "{{ root_password }}" - ports: - - "{{ mysql_port + item - 1 }}:{{ mysql_listen_port }}" - - "{{ group_seeds_port + item - 1 }}:{{ group_seeds_port }}" - volumes: - - "{{ data_dir }}/node{{ item }}/my.cnf:/etc/mysql/my.cnf" - - "{{ data_dir }}/node{{ item }}/data:/var/lib/mysql" - loop: "{{ range(1, nodes_count | int + 1) | list }}" - -- name: Wait for MySQL to be available - wait_for: - host: localhost - port: "{{ mysql_port + item - 1 }}" - delay: 10 - timeout: 300 - loop: "{{ range(1, nodes_count | int + 1) | list }}" From a354cc6021d52127fab235c43c2c4ac6240d1b1a Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 15:29:47 +0100 Subject: [PATCH 06/39] PMM-13992: Fix mysql setup for 5.7 --- pmm_qa/mysql/data/my-async-replication-57.cnf.j2 | 1 - pmm_qa/mysql/data/my-group-replication-57.cnf.j2 | 1 - 2 files changed, 2 deletions(-) diff --git a/pmm_qa/mysql/data/my-async-replication-57.cnf.j2 b/pmm_qa/mysql/data/my-async-replication-57.cnf.j2 index b2f96dc5..a7e3e1d8 100644 --- a/pmm_qa/mysql/data/my-async-replication-57.cnf.j2 +++ b/pmm_qa/mysql/data/my-async-replication-57.cnf.j2 @@ -3,7 +3,6 @@ server_id={{ item }} bind-address=0.0.0.0 port={{ mysql_listen_port }} -userstat=1 # Replication settings gtid_mode=ON diff --git a/pmm_qa/mysql/data/my-group-replication-57.cnf.j2 b/pmm_qa/mysql/data/my-group-replication-57.cnf.j2 index 74383253..84c98784 100644 --- a/pmm_qa/mysql/data/my-group-replication-57.cnf.j2 +++ b/pmm_qa/mysql/data/my-group-replication-57.cnf.j2 @@ -4,7 +4,6 @@ server_id={{ server_id_start + item - 1 }} binlog_format=ROW bind-address=0.0.0.0 port={{ mysql_listen_port }} -userstat=1 # 5.7 General replication settings gtid_mode=ON From 5f692b2a2458a4be38841dbf54919e97469d7892 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 16:57:46 +0100 Subject: [PATCH 07/39] PMM-13992: Fix mysql setup gr for 8.4 --- pmm_qa/mysql/data/my-group-replication.cnf.j2 | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/pmm_qa/mysql/data/my-group-replication.cnf.j2 b/pmm_qa/mysql/data/my-group-replication.cnf.j2 index 473e80e3..9c0f4076 100644 --- a/pmm_qa/mysql/data/my-group-replication.cnf.j2 +++ b/pmm_qa/mysql/data/my-group-replication.cnf.j2 @@ -8,12 +8,10 @@ port={{ mysql_listen_port }} gtid_mode=ON enforce_gtid_consistency=ON binlog_checksum=NONE -binlog_format=ROW -transaction_write_set_extraction=XXHASH64 log_bin=binlog log_replica_updates=ON disabled_storage_engines="MyISAM,BLACKHOLE,FEDERATED,ARCHIVE,MEMORY" -lower_case_table_names=2 +lower_case_table_names=2 # MacOS-specific, but also good generally # MySQL 8.4 compatibility settings report_host={{ container_prefix }}{{ item }} @@ -32,7 +30,6 @@ loose-group_replication_single_primary_mode=ON loose-group_replication_enforce_update_everywhere_checks=OFF # Recovery settings -loose-group_replication_recovery_use_ssl=ON # Add this for secure recovery loose-group_replication_recovery_get_public_key=ON loose-group_replication_recovery_retry_count=10 loose-group_replication_recovery_reconnect_interval=60 @@ -45,8 +42,3 @@ relay_log_purge=ON # Performance and connection settings max_connections=1000 innodb_buffer_pool_size=256M - -# Additional recommended settings -loose-group_replication_compression_threshold=1000000 # Compress messages > 1MB -loose-group_replication_member_expel_timeout=5 # Seconds before expelling unresponsive member -loose-group_replication_autorejoin_tries=3 # Auto-rejoin attempts after expulsion From c4a366a8cde2a1833cda48df8db9f1d8bb59ff1c Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 17:01:10 +0100 Subject: [PATCH 08/39] PMM-13992: Fix naming --- pmm_qa/mysql/tasks/prepare_install_mysql.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pmm_qa/mysql/tasks/prepare_install_mysql.yml b/pmm_qa/mysql/tasks/prepare_install_mysql.yml index be24a9c6..96366689 100644 --- a/pmm_qa/mysql/tasks/prepare_install_mysql.yml +++ b/pmm_qa/mysql/tasks/prepare_install_mysql.yml @@ -107,7 +107,8 @@ shell: | docker exec {{ container_prefix }}{{ item }} mkdir -p /etc/mysql docker cp {{ data_dir }}/node{{ index }}/my.cnf {{ container_prefix }}{{ item }}:/etc/mysql/my.cnf -- name: Restart Percona Server for MySQL + +- name: Restart MySQL shell: docker exec {{ container_prefix }}{{ index }} systemctl restart mysql - name: Wait 5 seconds for MySQL to start From fcf8cdcf4d5326216c036d969d05661492865034 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 17:17:03 +0100 Subject: [PATCH 09/39] PMM-13992: Add innodb compression to mysql --- pmm_qa/data/mysql_load.sql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 8992e400..a98191a0 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -7,13 +7,13 @@ CREATE TABLE students ( first_name VARCHAR(50), last_name VARCHAR(50), birth_date DATE -); +) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; CREATE TABLE classes ( class_id INT AUTO_INCREMENT PRIMARY KEY, name VARCHAR(100), teacher VARCHAR(100) -); +) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; CREATE TABLE enrollments ( enrollment_id INT AUTO_INCREMENT PRIMARY KEY, @@ -22,7 +22,7 @@ CREATE TABLE enrollments ( enrollment_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (student_id) REFERENCES students(student_id), FOREIGN KEY (class_id) REFERENCES classes(class_id) -); +) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; -- ======================================== -- INSERT INITIAL DATA From 3eb04b21aff5463d9df3454601939f5d59507b2b Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 17:29:48 +0100 Subject: [PATCH 10/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 132 +++++++++++++------------------------ 1 file changed, 45 insertions(+), 87 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index a98191a0..4ae7e8cc 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -1,94 +1,52 @@ -- ======================================== --- CREATE TABLES --- ======================================== - -CREATE TABLE students ( - student_id INT AUTO_INCREMENT PRIMARY KEY, - first_name VARCHAR(50), - last_name VARCHAR(50), - birth_date DATE -) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; - -CREATE TABLE classes ( - class_id INT AUTO_INCREMENT PRIMARY KEY, - name VARCHAR(100), - teacher VARCHAR(100) -) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; - -CREATE TABLE enrollments ( - enrollment_id INT AUTO_INCREMENT PRIMARY KEY, - student_id INT, - class_id INT, - enrollment_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - FOREIGN KEY (student_id) REFERENCES students(student_id), - FOREIGN KEY (class_id) REFERENCES classes(class_id) -) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; - --- ======================================== --- INSERT INITIAL DATA --- ======================================== - -INSERT INTO students (first_name, last_name, birth_date) VALUES -('Alice', 'Smith', '2005-04-10'), -('Bob', 'Johnson', '2006-08-15'), -('Charlie', 'Brown', '2004-12-01'); - -INSERT INTO classes (name, teacher) VALUES -('Mathematics', 'Mrs. Taylor'), -('History', 'Mr. Anderson'), -('Science', 'Dr. Reynolds'); - -INSERT INTO enrollments (student_id, class_id) VALUES -(1, 1), -(1, 2), -(2, 2), -(3, 1), -(3, 3); - --- ======================================== --- SELECT: View all data after insert --- ======================================== - --- View all students -SELECT * FROM students; - --- View all classes -SELECT * FROM classes; - --- View all enrollments -SELECT * FROM enrollments; - --- View students enrolled in Mathematics -SELECT s.first_name, s.last_name -FROM students s -JOIN enrollments e ON s.student_id = e.student_id -JOIN classes c ON e.class_id = c.class_id -WHERE c.name = 'Mathematics'; - --- Count students per class -SELECT c.name AS class_name, COUNT(e.student_id) AS student_count -FROM classes c -LEFT JOIN enrollments e ON c.class_id = e.class_id -GROUP BY c.name; - --- ======================================== --- UPDATE DATA --- ======================================== - +-- "COMPRESSION METRIC BOOSTER" SECTION +-- Add heavy bulk-inserts and updates to trigger compression ops/timings +-- ======================================== + +-- Add more students with large fields to fill compressed pages +INSERT INTO students (first_name, last_name, birth_date) +SELECT CONCAT('TestFirst', n), REPEAT('LongSurname', 10), '2000-01-01' +FROM ( + SELECT @row := @row + 1 AS n FROM + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, + (SELECT @row := 0) r + LIMIT 1000 +) numbers; + +-- Add classes with big teacher names +INSERT INTO classes (name, teacher) +SELECT CONCAT('Class', n), REPEAT('TeacherLongName', 10) +FROM ( + SELECT @row := @row + 1 AS n FROM + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, + (SELECT @row := 0) r + LIMIT 100 +) numbers; + +-- Create a large number of enrollments randomly +INSERT INTO enrollments (student_id, class_id) +SELECT FLOOR(1 + (RAND() * 1000)), FLOOR(1 + (RAND() * 100)) +FROM (SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 UNION ALL SELECT 10) t1, + (SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 UNION ALL SELECT 10) t2; + +-- Additional updates to trigger further compression activity UPDATE students -SET last_name = 'Williams' -WHERE first_name = 'Bob' AND last_name = 'Johnson'; +SET last_name = REPEAT('Surname', 15) +WHERE student_id <= 500; UPDATE classes -SET teacher = 'Ms. Carter' -WHERE name = 'History'; - --- ======================================== --- DELETE DATA --- ======================================== +SET teacher = REPEAT('DrLongTeacherSurname', 8) +WHERE class_id <= 50; +-- Optionally, delete some records to cause page reorganization/compression DELETE FROM enrollments -WHERE student_id = (SELECT student_id FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'); +WHERE enrollment_id % 7 = 0; + +-- Optionally, compress fragmented pages further +OPTIMIZE TABLE students; +OPTIMIZE TABLE classes; +OPTIMIZE TABLE enrollments; -DELETE FROM students -WHERE first_name = 'Alice' AND last_name = 'Smith'; +-- ======================================== +-- End of booster section +-- ======================================== \ No newline at end of file From 7f5def6f742cbec7f8bd00b66d37b5ac439c2fa5 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 17:48:34 +0100 Subject: [PATCH 11/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 66 ++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 35 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 4ae7e8cc..6e38b2a6 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -1,52 +1,48 @@ -- ======================================== --- "COMPRESSION METRIC BOOSTER" SECTION --- Add heavy bulk-inserts and updates to trigger compression ops/timings +-- "HEAVY COMPRESSION METRIC BOOSTER" SECTION +-- Intensely stimulate compression & CPU load -- ======================================== --- Add more students with large fields to fill compressed pages +-- Add a much larger number of students (10,000) INSERT INTO students (first_name, last_name, birth_date) -SELECT CONCAT('TestFirst', n), REPEAT('LongSurname', 10), '2000-01-01' +SELECT CONCAT('TestStudent', n), REPEAT('VeryLongSurname', 100), '2000-01-01' FROM ( - SELECT @row := @row + 1 AS n FROM + SELECT @row := @row + 1 AS n FROM (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t2, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t3, (SELECT @row := 0) r - LIMIT 1000 -) numbers; + LIMIT 10000 +) big_numbers; --- Add classes with big teacher names +-- Add many classes with big teacher names (1,000) INSERT INTO classes (name, teacher) -SELECT CONCAT('Class', n), REPEAT('TeacherLongName', 10) +SELECT CONCAT('Class', n), REPEAT('TeacherLongNameExtra', 50) FROM ( - SELECT @row := @row + 1 AS n FROM + SELECT @row := @row + 1 AS n FROM (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t2, (SELECT @row := 0) r - LIMIT 100 + LIMIT 1000 ) numbers; --- Create a large number of enrollments randomly +-- Create 100,000 random enrollments INSERT INTO enrollments (student_id, class_id) -SELECT FLOOR(1 + (RAND() * 1000)), FLOOR(1 + (RAND() * 100)) -FROM (SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 UNION ALL SELECT 10) t1, - (SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 UNION ALL SELECT 10) t2; - --- Additional updates to trigger further compression activity +SELECT FLOOR(1 + (RAND() * 10000)), FLOOR(1 + (RAND() * 1000)) +FROM ( + SELECT @n := @n + 1 FROM + (SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 UNION ALL SELECT 10) t1, + (SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 UNION ALL SELECT 10) t2, + (SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 UNION ALL SELECT 10) t3, + (SELECT @n := 0) r + LIMIT 100000 +) massive_enrollments; + +-- Heavy updates: repeatedly update many large records UPDATE students -SET last_name = REPEAT('Surname', 15) -WHERE student_id <= 500; - -UPDATE classes -SET teacher = REPEAT('DrLongTeacherSurname', 8) -WHERE class_id <= 50; +SET last_name = REPEAT('CPUSurnameOverload', 100) +WHERE student_id % 3 = 0; --- Optionally, delete some records to cause page reorganization/compression -DELETE FROM enrollments -WHERE enrollment_id % 7 = 0; - --- Optionally, compress fragmented pages further -OPTIMIZE TABLE students; -OPTIMIZE TABLE classes; -OPTIMIZE TABLE enrollments; - --- ======================================== --- End of booster section --- ======================================== \ No newline at end of file +UPDATE students +SET last_name = REPEAT('Another* + \ No newline at end of file From cfc8a9e5dcc3b4e73b691961fa9d23bb49dece41 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 17:56:21 +0100 Subject: [PATCH 12/39] PMM-13992: Bigger test data --- pmm_qa/mysql/mysql-setup.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pmm_qa/mysql/mysql-setup.yml b/pmm_qa/mysql/mysql-setup.yml index c9a51069..42d9aa51 100644 --- a/pmm_qa/mysql/mysql-setup.yml +++ b/pmm_qa/mysql/mysql-setup.yml @@ -134,12 +134,12 @@ - name: Prepare data for sysbench inside of all mysql nodes shell: | - docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 prepare + docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --mysql-table-engine="InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8" prepare when: setup_type != "gr" and setup_type != "replication" loop: "{{ range(1, nodes_count | int + 1) | list }}" - name: Prepare data for sysbench inside of first mysql nodes - shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 prepare + shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --mysql-table-engine="InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8" --table-size=100000 prepare when: setup_type == "gr" or setup_type == "replication" - name: Run load for sysbench inside of all mysql nodes From cdba5b7d91e61a7b1d3695a6504a4e1f4e4879bb Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 17:57:47 +0100 Subject: [PATCH 13/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 126 ++++++++++++++++++++++++++++++++++++- 1 file changed, 124 insertions(+), 2 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 6e38b2a6..5dbb3dd2 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -1,3 +1,98 @@ +-- ======================================== +-- CREATE TABLES +-- ======================================== + +CREATE TABLE students ( + student_id INT AUTO_INCREMENT PRIMARY KEY, + first_name VARCHAR(50), + last_name VARCHAR(50), + birth_date DATE +) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; + +CREATE TABLE classes ( + class_id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(100), + teacher VARCHAR(100) +) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; + +CREATE TABLE enrollments ( + enrollment_id INT AUTO_INCREMENT PRIMARY KEY, + student_id INT, + class_id INT, + enrollment_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (student_id) REFERENCES students(student_id), + FOREIGN KEY (class_id) REFERENCES classes(class_id) +); + +-- ======================================== +-- INSERT INITIAL DATA +-- ======================================== + +INSERT INTO students (first_name, last_name, birth_date) VALUES +('Alice', 'Smith', '2005-04-10'), +('Bob', 'Johnson', '2006-08-15'), +('Charlie', 'Brown', '2004-12-01'); + +INSERT INTO classes (name, teacher) VALUES +('Mathematics', 'Mrs. Taylor'), +('History', 'Mr. Anderson'), +('Science', 'Dr. Reynolds'); + +INSERT INTO enrollments (student_id, class_id) VALUES +(1, 1), +(1, 2), +(2, 2), +(3, 1), +(3, 3); + +-- ======================================== +-- SELECT: View all data after insert +-- ======================================== + +-- View all students +SELECT * FROM students; + +-- View all classes +SELECT * FROM classes; + +-- View all enrollments +SELECT * FROM enrollments; + +-- View students enrolled in Mathematics +SELECT s.first_name, s.last_name +FROM students s +JOIN enrollments e ON s.student_id = e.student_id +JOIN classes c ON e.class_id = c.class_id +WHERE c.name = 'Mathematics'; + +-- Count students per class +SELECT c.name AS class_name, COUNT(e.student_id) AS student_count +FROM classes c +LEFT JOIN enrollments e ON c.class_id = e.class_id +GROUP BY c.name; + +-- ======================================== +-- UPDATE DATA +-- ======================================== + +UPDATE students +SET last_name = 'Williams' +WHERE first_name = 'Bob' AND last_name = 'Johnson'; + +UPDATE classes +SET teacher = 'Ms. Carter' +WHERE name = 'History'; + +-- ======================================== +-- DELETE DATA +-- ======================================== + +DELETE FROM enrollments +WHERE student_id = (SELECT student_id FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'); + +DELETE FROM students +WHERE first_name = 'Alice' AND last_name = 'Smith'; + -- ======================================== -- "HEAVY COMPRESSION METRIC BOOSTER" SECTION -- Intensely stimulate compression & CPU load @@ -44,5 +139,32 @@ SET last_name = REPEAT('CPUSurnameOverload', 100) WHERE student_id % 3 = 0; UPDATE students -SET last_name = REPEAT('Another* - \ No newline at end of file +SET last_name = REPEAT('AnotherSurnamePattern', 120) +WHERE student_id % 7 = 0; + +-- Massive class teacher name updates +UPDATE classes +SET teacher = REPEAT('VeryCPUIntensiveTeacher', 60) +WHERE class_id % 2 = 0; + +UPDATE classes +SET teacher = REPEAT('XtremeTeacher', 80) +WHERE class_id % 3 = 0; + +-- Heavy delete cycles to fragment and reorganize pages +DELETE FROM enrollments WHERE enrollment_id % 17 = 0; +DELETE FROM enrollments WHERE enrollment_id % 23 = 0; + +-- Force additional storage engine work (optional, can be slow!!) +OPTIMIZE TABLE students; +OPTIMIZE TABLE classes; +OPTIMIZE TABLE enrollments; + +-- Optional: Table scan SUMs to burn more CPU +SELECT SUM(CHAR_LENGTH(first_name) + CHAR_LENGTH(last_name)) FROM students; +SELECT SUM(CHAR_LENGTH(name) + CHAR_LENGTH(teacher)) FROM classes; +SELECT COUNT(*) FROM enrollments; + +-- ======================================== +-- End of heavy booster section +-- ======================================== \ No newline at end of file From c3d3c6e53a446db899628dac6cac725093769762 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 18:00:28 +0100 Subject: [PATCH 14/39] PMM-13992: Bigger test data --- pmm_qa/mysql/mysql-setup.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pmm_qa/mysql/mysql-setup.yml b/pmm_qa/mysql/mysql-setup.yml index 42d9aa51..c9a51069 100644 --- a/pmm_qa/mysql/mysql-setup.yml +++ b/pmm_qa/mysql/mysql-setup.yml @@ -134,12 +134,12 @@ - name: Prepare data for sysbench inside of all mysql nodes shell: | - docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --mysql-table-engine="InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8" prepare + docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 prepare when: setup_type != "gr" and setup_type != "replication" loop: "{{ range(1, nodes_count | int + 1) | list }}" - name: Prepare data for sysbench inside of first mysql nodes - shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --mysql-table-engine="InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8" --table-size=100000 prepare + shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 prepare when: setup_type == "gr" or setup_type == "replication" - name: Run load for sysbench inside of all mysql nodes From 3a4b6ea0df4422513b5c5bdba88b35e803cebc73 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 18:18:37 +0100 Subject: [PATCH 15/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 5dbb3dd2..ef9a2b74 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -4,8 +4,8 @@ CREATE TABLE students ( student_id INT AUTO_INCREMENT PRIMARY KEY, - first_name VARCHAR(50), - last_name VARCHAR(50), + first_name VARCHAR(250), + last_name VARCHAR(250), birth_date DATE ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; From 349ea5724efdbc9d2ca9d16b36568510f8c9b9f0 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 18:23:34 +0100 Subject: [PATCH 16/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index ef9a2b74..db32ec4a 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -5,7 +5,7 @@ CREATE TABLE students ( student_id INT AUTO_INCREMENT PRIMARY KEY, first_name VARCHAR(250), - last_name VARCHAR(250), + last_name VARCHAR(1000), birth_date DATE ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; From b6c080989816c807fe3b8d86bc3696e4a7191247 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 18:35:38 +0100 Subject: [PATCH 17/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index db32ec4a..2d546c90 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -5,7 +5,7 @@ CREATE TABLE students ( student_id INT AUTO_INCREMENT PRIMARY KEY, first_name VARCHAR(250), - last_name VARCHAR(1000), + last_name VARCHAR(2000), birth_date DATE ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; From 221e429c54a381321c6e5763135b6408ce141c0b Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 18:40:26 +0100 Subject: [PATCH 18/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 2d546c90..f231f27e 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -12,7 +12,7 @@ CREATE TABLE students ( CREATE TABLE classes ( class_id INT AUTO_INCREMENT PRIMARY KEY, name VARCHAR(100), - teacher VARCHAR(100) + teacher VARCHAR(2000) ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; CREATE TABLE enrollments ( From 707deaa9261df9a37105b7a88dc21b4fcde0c2f7 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 18:45:49 +0100 Subject: [PATCH 19/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index f231f27e..da55ffb3 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -123,7 +123,15 @@ FROM ( -- Create 100,000 random enrollments INSERT INTO enrollments (student_id, class_id) -SELECT FLOOR(1 + (RAND() * 10000)), FLOOR(1 + (RAND() * 1000)) +SELECT s.student_id, c.class_id +FROM ( + SELECT student_id FROM students ORDER BY RAND() LIMIT 100000 +) s +JOIN ( + SELECT class_id FROM classes ORDER BY RAND() LIMIT 1000 +) c +ON 1=1 +LIMIT 100000; FROM ( SELECT @n := @n + 1 FROM (SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 UNION ALL SELECT 10) t1, From 092739719e93dc27bb5aee79900cdbf658ab7afe Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 18:51:36 +0100 Subject: [PATCH 20/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 190 ++++++++++++++----------------------- 1 file changed, 72 insertions(+), 118 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index da55ffb3..70fa03b6 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -1,18 +1,24 @@ -- ======================================== --- CREATE TABLES +-- CREATE DB AND TABLES -- ======================================== +CREATE DATABASE IF NOT EXISTS school; +USE school; + +DROP TABLE IF EXISTS enrollments; +DROP TABLE IF EXISTS students; +DROP TABLE IF EXISTS classes; CREATE TABLE students ( student_id INT AUTO_INCREMENT PRIMARY KEY, - first_name VARCHAR(250), - last_name VARCHAR(2000), + first_name TEXT, + last_name TEXT, birth_date DATE ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; CREATE TABLE classes ( class_id INT AUTO_INCREMENT PRIMARY KEY, - name VARCHAR(100), - teacher VARCHAR(2000) + name TEXT, + teacher TEXT ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; CREATE TABLE enrollments ( @@ -22,157 +28,105 @@ CREATE TABLE enrollments ( enrollment_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (student_id) REFERENCES students(student_id), FOREIGN KEY (class_id) REFERENCES classes(class_id) -); +) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; -- ======================================== -- INSERT INITIAL DATA -- ======================================== -INSERT INTO students (first_name, last_name, birth_date) VALUES -('Alice', 'Smith', '2005-04-10'), -('Bob', 'Johnson', '2006-08-15'), -('Charlie', 'Brown', '2004-12-01'); - -INSERT INTO classes (name, teacher) VALUES -('Mathematics', 'Mrs. Taylor'), -('History', 'Mr. Anderson'), -('Science', 'Dr. Reynolds'); +-- Insert 10,000 students with big names +INSERT INTO students (first_name, last_name, birth_date) +SELECT CONCAT('Student', n), + REPEAT('Surname', 80), + DATE_ADD('2000-01-01', INTERVAL RAND()*8000 DAY) +FROM ( + SELECT @n := @n + 1 AS n FROM + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL + SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL + SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t2, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL + SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t3, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL + SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t4, + (SELECT @n := 0) r + LIMIT 10000 +) numbers; -INSERT INTO enrollments (student_id, class_id) VALUES -(1, 1), -(1, 2), -(2, 2), -(3, 1), -(3, 3); +-- Insert 1000 classes with large teacher names +INSERT INTO classes (name, teacher) +SELECT CONCAT('Class', n), + REPEAT('TeacherLongName', 80) +FROM ( + SELECT @n2 := @n2 + 1 AS n FROM + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL + SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL + SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t2, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL + SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t3, + (SELECT @n2 := 0) r + LIMIT 1000 +) numbers; -- ======================================== --- SELECT: View all data after insert +-- CREATE A TEMPORARY HELPER TABLE FOR 100k ROWS -- ======================================== - --- View all students -SELECT * FROM students; - --- View all classes -SELECT * FROM classes; - --- View all enrollments -SELECT * FROM enrollments; - --- View students enrolled in Mathematics -SELECT s.first_name, s.last_name -FROM students s -JOIN enrollments e ON s.student_id = e.student_id -JOIN classes c ON e.class_id = c.class_id -WHERE c.name = 'Mathematics'; - --- Count students per class -SELECT c.name AS class_name, COUNT(e.student_id) AS student_count -FROM classes c -LEFT JOIN enrollments e ON c.class_id = e.class_id -GROUP BY c.name; +DROP TEMPORARY TABLE IF EXISTS counter; +CREATE TEMPORARY TABLE counter (n INT PRIMARY KEY AUTO_INCREMENT) ENGINE=Memory; +INSERT INTO counter VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL); +INSERT INTO counter(n) SELECT NULL FROM counter; -- 10*10 = 100 +INSERT INTO counter(n) SELECT NULL FROM counter; -- 100*10 = 1,000 +INSERT INTO counter(n) SELECT NULL FROM counter; -- 1,000*10 = 10,000 +INSERT INTO counter(n) SELECT NULL FROM counter; -- 10,000*10 = 100,000 -- ======================================== --- UPDATE DATA +-- BULK INSERT ENROLLMENTS (100,000 rows, all valid FKs) -- ======================================== +INSERT INTO enrollments (student_id, class_id) +SELECT + (SELECT student_id FROM students ORDER BY RAND() LIMIT 1), + (SELECT class_id FROM classes ORDER BY RAND() LIMIT 1) +FROM counter +LIMIT 100000; -UPDATE students -SET last_name = 'Williams' -WHERE first_name = 'Bob' AND last_name = 'Johnson'; - -UPDATE classes -SET teacher = 'Ms. Carter' -WHERE name = 'History'; - --- ======================================== --- DELETE DATA --- ======================================== - -DELETE FROM enrollments -WHERE student_id = (SELECT student_id FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'); - -DELETE FROM students -WHERE first_name = 'Alice' AND last_name = 'Smith'; +DROP TEMPORARY TABLE counter; -- ======================================== --- "HEAVY COMPRESSION METRIC BOOSTER" SECTION --- Intensely stimulate compression & CPU load +-- HEAVY UPDATES, DELETES, OPTIMIZE FOR CPU/COMPRESSION & PAGE CHANGE -- ======================================== --- Add a much larger number of students (10,000) -INSERT INTO students (first_name, last_name, birth_date) -SELECT CONCAT('TestStudent', n), REPEAT('VeryLongSurname', 100), '2000-01-01' -FROM ( - SELECT @row := @row + 1 AS n FROM - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t2, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t3, - (SELECT @row := 0) r - LIMIT 10000 -) big_numbers; - --- Add many classes with big teacher names (1,000) -INSERT INTO classes (name, teacher) -SELECT CONCAT('Class', n), REPEAT('TeacherLongNameExtra', 50) -FROM ( - SELECT @row := @row + 1 AS n FROM - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t2, - (SELECT @row := 0) r - LIMIT 1000 -) numbers; - --- Create 100,000 random enrollments -INSERT INTO enrollments (student_id, class_id) -SELECT s.student_id, c.class_id -FROM ( - SELECT student_id FROM students ORDER BY RAND() LIMIT 100000 -) s -JOIN ( - SELECT class_id FROM classes ORDER BY RAND() LIMIT 1000 -) c -ON 1=1 -LIMIT 100000; -FROM ( - SELECT @n := @n + 1 FROM - (SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 UNION ALL SELECT 10) t1, - (SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 UNION ALL SELECT 10) t2, - (SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 UNION ALL SELECT 10) t3, - (SELECT @n := 0) r - LIMIT 100000 -) massive_enrollments; - --- Heavy updates: repeatedly update many large records +-- Bulk updates to make lots of compression work UPDATE students -SET last_name = REPEAT('CPUSurnameOverload', 100) +SET last_name = REPEAT('CPUSurnameOverload', 80) WHERE student_id % 3 = 0; UPDATE students -SET last_name = REPEAT('AnotherSurnamePattern', 120) -WHERE student_id % 7 = 0; +SET last_name = REPEAT('AnotherSurnamePattern', 80) +WHERE student_id % 5 = 0; --- Massive class teacher name updates UPDATE classes -SET teacher = REPEAT('VeryCPUIntensiveTeacher', 60) +SET teacher = REPEAT('VeryCPUIntensiveTeacher', 40) WHERE class_id % 2 = 0; UPDATE classes -SET teacher = REPEAT('XtremeTeacher', 80) +SET teacher = REPEAT('XtremeTeacher', 120) WHERE class_id % 3 = 0; --- Heavy delete cycles to fragment and reorganize pages +-- Bulk delete for page re-org DELETE FROM enrollments WHERE enrollment_id % 17 = 0; DELETE FROM enrollments WHERE enrollment_id % 23 = 0; --- Force additional storage engine work (optional, can be slow!!) +-- Force flush/defrag pages (can be slow!) OPTIMIZE TABLE students; OPTIMIZE TABLE classes; OPTIMIZE TABLE enrollments; --- Optional: Table scan SUMs to burn more CPU +-- (Optional: Table scan SUMs to burn more CPU) SELECT SUM(CHAR_LENGTH(first_name) + CHAR_LENGTH(last_name)) FROM students; SELECT SUM(CHAR_LENGTH(name) + CHAR_LENGTH(teacher)) FROM classes; SELECT COUNT(*) FROM enrollments; -- ======================================== --- End of heavy booster section +-- END OF SCRIPT -- ======================================== \ No newline at end of file From d4c8c5368d6a6f73ace964302df5461cb05025c9 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 18:58:15 +0100 Subject: [PATCH 21/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 70fa03b6..04a3c2f6 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -69,16 +69,16 @@ FROM ( LIMIT 1000 ) numbers; --- ======================================== --- CREATE A TEMPORARY HELPER TABLE FOR 100k ROWS --- ======================================== DROP TEMPORARY TABLE IF EXISTS counter; -CREATE TEMPORARY TABLE counter (n INT PRIMARY KEY AUTO_INCREMENT) ENGINE=Memory; -INSERT INTO counter VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL); -INSERT INTO counter(n) SELECT NULL FROM counter; -- 10*10 = 100 -INSERT INTO counter(n) SELECT NULL FROM counter; -- 100*10 = 1,000 -INSERT INTO counter(n) SELECT NULL FROM counter; -- 1,000*10 = 10,000 -INSERT INTO counter(n) SELECT NULL FROM counter; -- 10,000*10 = 100,000 +CREATE TEMPORARY TABLE counter (n INT PRIMARY KEY); + +WITH RECURSIVE seq AS ( + SELECT 1 AS n + UNION ALL + SELECT n + 1 FROM seq WHERE n < 100000 +) +INSERT INTO counter (n) +SELECT n FROM seq; -- ======================================== -- BULK INSERT ENROLLMENTS (100,000 rows, all valid FKs) From 0d1ee57973540aadbe810f5279dafe22f96a617b Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 19:04:45 +0100 Subject: [PATCH 22/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 1 - 1 file changed, 1 deletion(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 04a3c2f6..9c15b791 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -78,7 +78,6 @@ WITH RECURSIVE seq AS ( SELECT n + 1 FROM seq WHERE n < 100000 ) INSERT INTO counter (n) -SELECT n FROM seq; -- ======================================== -- BULK INSERT ENROLLMENTS (100,000 rows, all valid FKs) From 1534d9022e693c50145a9648e671ef7a189a7614 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 19:09:37 +0100 Subject: [PATCH 23/39] PMM-13992: Bigger test data --- pmm_qa/mysql/mysql-setup.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pmm_qa/mysql/mysql-setup.yml b/pmm_qa/mysql/mysql-setup.yml index c9a51069..11700bf0 100644 --- a/pmm_qa/mysql/mysql-setup.yml +++ b/pmm_qa/mysql/mysql-setup.yml @@ -134,21 +134,21 @@ - name: Prepare data for sysbench inside of all mysql nodes shell: | - docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 prepare + docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --mysql-table-engine=InnoDB prepare when: setup_type != "gr" and setup_type != "replication" loop: "{{ range(1, nodes_count | int + 1) | list }}" - name: Prepare data for sysbench inside of first mysql nodes - shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 prepare + shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --mysql-table-engine=InnoDB prepare when: setup_type == "gr" or setup_type == "replication" - name: Run load for sysbench inside of all mysql nodes - shell: docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --threads=16 --time=60 run + shell: docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --threads=16 --time=60 --mysql-table-engine=InnoDB run loop: "{{ range(1, nodes_count | int + 1) | list }}" when: setup_type != "gr" and setup_type != "replication" - name: Run load for sysbench inside of primary mysql node - shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --threads=16 --time=60 run + shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --threads=16 --time=60 --mysql-table-engine=InnoDB run when: setup_type == "gr" and setup_type == "replication" - name: Copy a load file into the container From 82925c5266c753a6a656c0a2d6e881998ec65643 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 19:13:23 +0100 Subject: [PATCH 24/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 326 +++++++++++++++++++++++-------------- 1 file changed, 207 insertions(+), 119 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 9c15b791..8107f3bc 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -1,131 +1,219 @@ --- ======================================== --- CREATE DB AND TABLES --- ======================================== -CREATE DATABASE IF NOT EXISTS school; -USE school; - -DROP TABLE IF EXISTS enrollments; -DROP TABLE IF EXISTS students; -DROP TABLE IF EXISTS classes; - -CREATE TABLE students ( - student_id INT AUTO_INCREMENT PRIMARY KEY, - first_name TEXT, - last_name TEXT, - birth_date DATE +-- ========================================================= +-- InnoDB Compression Stress & Metrics Script (MySQL 8.4+) +-- ========================================================= +-- Adjust these first if desired +SET @rows_per_table := 100000; -- Base row count (increase for more stress) +SET @update_touch_fraction := 0.30; -- Fraction of rows to update (approx) +SET @delete_fraction := 0.10; -- Fraction of rows to delete (approx) + +-- Raise recursion depth for large CTE generation +SET SESSION cte_max_recursion_depth = 200000; + +-- Drop & recreate schema +DROP DATABASE IF EXISTS innodb_compress_lab; +CREATE DATABASE innodb_compress_lab; +USE innodb_compress_lab; + +-- Optional: ensure per-table tablespace (generally default ON now) +-- SHOW VARIABLES LIKE 'innodb_file_per_table'; + +-- ========================================================= +-- Helper CTE for generating N numbers (1..@rows_per_table) +-- ========================================================= +WITH RECURSIVE seq AS ( + SELECT 1 AS n + UNION ALL + SELECT n + 1 FROM seq WHERE n < @rows_per_table +) +SELECT COUNT(*) AS generated_rows INTO @generated_rows +FROM seq; +-- At this point seq can be re-used inside each INSERT ... SELECT (we'll redefine per table). + +-- ========================================================= +-- TABLE CREATION (different KEY_BLOCK_SIZE values) +-- ========================================================= +-- Highly compressible: repeated patterns +CREATE TABLE t_comp_2 ( + id INT PRIMARY KEY AUTO_INCREMENT, + compressible TEXT, + semi_random TEXT +) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; + +CREATE TABLE t_comp_4 ( + id INT PRIMARY KEY AUTO_INCREMENT, + compressible TEXT, + semi_random TEXT +) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; + +CREATE TABLE t_comp_8 ( + id INT PRIMARY KEY AUTO_INCREMENT, + compressible TEXT, + semi_random TEXT ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; -CREATE TABLE classes ( - class_id INT AUTO_INCREMENT PRIMARY KEY, - name TEXT, - teacher TEXT +CREATE TABLE t_comp_16 ( + id INT PRIMARY KEY AUTO_INCREMENT, + compressible TEXT, + semi_random TEXT +) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; + +-- A mixed workload table (different patterns) reusing KEY_BLOCK_SIZE=8 +CREATE TABLE t_mixed_8 ( + id INT PRIMARY KEY AUTO_INCREMENT, + pattern_a TEXT, + pattern_b TEXT, + pattern_c TEXT ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; -CREATE TABLE enrollments ( - enrollment_id INT AUTO_INCREMENT PRIMARY KEY, - student_id INT, - class_id INT, - enrollment_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - FOREIGN KEY (student_id) REFERENCES students(student_id), - FOREIGN KEY (class_id) REFERENCES classes(class_id) -) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +-- ========================================================= +-- BEFORE METRICS SNAPSHOT +-- ========================================================= +SELECT 'BEFORE' AS phase, * FROM information_schema.innodb_cmp ORDER BY page_size; + +-- ========================================================= +-- DATA LOAD SECTION +-- Each table: insert @rows_per_table rows with varied compressibility +-- ========================================================= --- ======================================== --- INSERT INITIAL DATA --- ======================================== - --- Insert 10,000 students with big names -INSERT INTO students (first_name, last_name, birth_date) -SELECT CONCAT('Student', n), - REPEAT('Surname', 80), - DATE_ADD('2000-01-01', INTERVAL RAND()*8000 DAY) -FROM ( - SELECT @n := @n + 1 AS n FROM - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL - SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL - SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t2, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL - SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t3, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL - SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t4, - (SELECT @n := 0) r - LIMIT 10000 -) numbers; - --- Insert 1000 classes with large teacher names -INSERT INTO classes (name, teacher) -SELECT CONCAT('Class', n), - REPEAT('TeacherLongName', 80) -FROM ( - SELECT @n2 := @n2 + 1 AS n FROM - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL - SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL - SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t2, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 UNION ALL - SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t3, - (SELECT @n2 := 0) r - LIMIT 1000 -) numbers; - -DROP TEMPORARY TABLE IF EXISTS counter; -CREATE TEMPORARY TABLE counter (n INT PRIMARY KEY); +-- Utility function via inline expressions: +-- compressible: REPEAT('A', 5000) + REPEAT('B', 5000) etc. +-- semi_random: CONCAT of pseudo-random fragments using MD5(RAND()) +-- t_comp_2 WITH RECURSIVE seq AS ( SELECT 1 AS n UNION ALL - SELECT n + 1 FROM seq WHERE n < 100000 + SELECT n + 1 FROM seq WHERE n < @rows_per_table ) -INSERT INTO counter (n) +INSERT INTO t_comp_2 (compressible, semi_random) +SELECT + CONCAT(REPEAT('A', 4000), REPEAT('B', 4000), REPEAT('C', 2000)), + CONCAT(MD5(RAND()), MD5(RAND()), MD5(RAND())) +FROM seq; --- ======================================== --- BULK INSERT ENROLLMENTS (100,000 rows, all valid FKs) --- ======================================== -INSERT INTO enrollments (student_id, class_id) +-- t_comp_4 +WITH RECURSIVE seq AS ( + SELECT 1 AS n + UNION ALL + SELECT n + 1 FROM seq WHERE n < @rows_per_table +) +INSERT INTO t_comp_4 (compressible, semi_random) +SELECT + CONCAT(REPEAT('X', 3000), REPEAT('Y', 3000), REPEAT('Z', 4000)), + CONCAT(MD5(RAND()), ':', MD5(RAND()), ':', MD5(RAND())) +FROM seq; + +-- t_comp_8 +WITH RECURSIVE seq AS ( + SELECT 1 AS n + UNION ALL + SELECT n + 1 FROM seq WHERE n < @rows_per_table +) +INSERT INTO t_comp_8 (compressible, semi_random) +SELECT + REPEAT('LONGPATTERN1234567890', 600), -- ~12000 chars compressible + CONCAT(MD5(RAND()), MD5(RAND()), MD5(RAND()), MD5(RAND())) +FROM seq; + +-- t_comp_16 +WITH RECURSIVE seq AS ( + SELECT 1 AS n + UNION ALL + SELECT n + 1 FROM seq WHERE n < @rows_per_table +) +INSERT INTO t_comp_16 (compressible, semi_random) +SELECT + REPEAT('QQQQQQQQQQ', 1500), -- 15000 chars of repeated Q + CONCAT(MD5(RAND()), '-', MD5(RAND()), '-', MD5(RAND()), '-', MD5(RAND())) +FROM seq; + +-- t_mixed_8 (three distinct patterns) +WITH RECURSIVE seq AS ( + SELECT 1 AS n + UNION ALL + SELECT n + 1 FROM seq WHERE n < @rows_per_table +) +INSERT INTO t_mixed_8 (pattern_a, pattern_b, pattern_c) SELECT - (SELECT student_id FROM students ORDER BY RAND() LIMIT 1), - (SELECT class_id FROM classes ORDER BY RAND() LIMIT 1) -FROM counter -LIMIT 100000; - -DROP TEMPORARY TABLE counter; - --- ======================================== --- HEAVY UPDATES, DELETES, OPTIMIZE FOR CPU/COMPRESSION & PAGE CHANGE --- ======================================== - --- Bulk updates to make lots of compression work -UPDATE students -SET last_name = REPEAT('CPUSurnameOverload', 80) -WHERE student_id % 3 = 0; - -UPDATE students -SET last_name = REPEAT('AnotherSurnamePattern', 80) -WHERE student_id % 5 = 0; - -UPDATE classes -SET teacher = REPEAT('VeryCPUIntensiveTeacher', 40) -WHERE class_id % 2 = 0; - -UPDATE classes -SET teacher = REPEAT('XtremeTeacher', 120) -WHERE class_id % 3 = 0; - --- Bulk delete for page re-org -DELETE FROM enrollments WHERE enrollment_id % 17 = 0; -DELETE FROM enrollments WHERE enrollment_id % 23 = 0; - --- Force flush/defrag pages (can be slow!) -OPTIMIZE TABLE students; -OPTIMIZE TABLE classes; -OPTIMIZE TABLE enrollments; - --- (Optional: Table scan SUMs to burn more CPU) -SELECT SUM(CHAR_LENGTH(first_name) + CHAR_LENGTH(last_name)) FROM students; -SELECT SUM(CHAR_LENGTH(name) + CHAR_LENGTH(teacher)) FROM classes; -SELECT COUNT(*) FROM enrollments; - --- ======================================== --- END OF SCRIPT --- ======================================== \ No newline at end of file + REPEAT('M', 8000), + CONCAT(REPEAT('N1', 2000), REPEAT('N2', 2000)), + CONCAT(MD5(RAND()), REPEAT('R', 1000), MD5(RAND())) +FROM seq; + +-- ========================================================= +-- INTERMEDIATE METRICS (after inserts) +-- ========================================================= +SELECT 'AFTER_INSERTS' AS phase, * FROM information_schema.innodb_cmp ORDER BY page_size; + +-- ========================================================= +-- HEAVY UPDATE CYCLES (touch ~30% of rows) +-- ========================================================= +-- Use modulus predicates for approximate fractions + +UPDATE t_comp_2 +SET compressible = CONCAT(REPEAT('UPDATEDA', 3000), REPEAT('UPDATEDB', 3000)) +WHERE id % 10 IN (0,1,2); -- ~30% + +UPDATE t_comp_4 +SET semi_random = CONCAT(MD5(RAND()), MD5(RAND()), REPEAT('UPD', 2000)) +WHERE id % 10 IN (0,1,2); + +UPDATE t_comp_8 +SET compressible = REPEAT('UP8_', 4000) +WHERE id % 5 = 0; -- 20% + +UPDATE t_comp_16 +SET semi_random = CONCAT(REPEAT('CHANGED', 1000), MD5(RAND())) +WHERE id % 4 = 0; -- 25% + +UPDATE t_mixed_8 +SET pattern_b = REPEAT('REWRITEPATTERN', 3000) +WHERE id % 3 = 0; -- ~33% + +-- ========================================================= +-- DELETE FRACTION (approx 10%) to cause page reorganizations +-- ========================================================= +DELETE FROM t_comp_2 WHERE id % 10 = 0; +DELETE FROM t_comp_4 WHERE id % 10 = 0; +DELETE FROM t_comp_8 WHERE id % 10 = 0; +DELETE FROM t_comp_16 WHERE id % 10 = 0; +DELETE FROM t_mixed_8 WHERE id % 10 = 0; + +-- ========================================================= +-- OPTIMIZE TABLE (forces rebuild & compression) - optional & expensive +-- You can comment these out if runtime is too long. +-- ========================================================= +OPTIMIZE TABLE t_comp_2; +OPTIMIZE TABLE t_comp_4; +OPTIMIZE TABLE t_comp_8; +OPTIMIZE TABLE t_comp_16; +OPTIMIZE TABLE t_mixed_8; + +-- ========================================================= +-- FINAL METRICS SNAPSHOT +-- ========================================================= +SELECT 'FINAL' AS phase, * FROM information_schema.innodb_cmp ORDER BY page_size; + +-- Focused view (selected columns) +SELECT 'FINAL_FOCUSED' AS phase, + page_size, compress_ops, compress_time, uncompress_ops, uncompress_time +FROM information_schema.innodb_cmp +ORDER BY page_size; + +-- ========================================================= +-- OPTIONAL: Show table sizes +-- ========================================================= +SELECT table_name, + engine, + row_format, + DATA_LENGTH/1024/1024 AS data_mb, + INDEX_LENGTH/1024/1024 AS index_mb, + (DATA_LENGTH+INDEX_LENGTH)/1024/1024 AS total_mb +FROM information_schema.tables +WHERE table_schema='innodb_compress_lab' +ORDER BY total_mb DESC; + +-- ========================================================= +-- CLEANUP (uncomment if you want to drop everything at end) +-- ========================================================= +-- DROP DATABASE innodb_compress_lab; \ No newline at end of file From 06f99903935871ed5918219c21aabc27ab4a9bf3 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 19:14:05 +0100 Subject: [PATCH 25/39] PMM-13992: Bigger test data --- pmm_qa/mysql/mysql-setup.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pmm_qa/mysql/mysql-setup.yml b/pmm_qa/mysql/mysql-setup.yml index 11700bf0..c9a51069 100644 --- a/pmm_qa/mysql/mysql-setup.yml +++ b/pmm_qa/mysql/mysql-setup.yml @@ -134,21 +134,21 @@ - name: Prepare data for sysbench inside of all mysql nodes shell: | - docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --mysql-table-engine=InnoDB prepare + docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 prepare when: setup_type != "gr" and setup_type != "replication" loop: "{{ range(1, nodes_count | int + 1) | list }}" - name: Prepare data for sysbench inside of first mysql nodes - shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --mysql-table-engine=InnoDB prepare + shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 prepare when: setup_type == "gr" or setup_type == "replication" - name: Run load for sysbench inside of all mysql nodes - shell: docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --threads=16 --time=60 --mysql-table-engine=InnoDB run + shell: docker exec {{ container_prefix }}{{ item }} sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --threads=16 --time=60 run loop: "{{ range(1, nodes_count | int + 1) | list }}" when: setup_type != "gr" and setup_type != "replication" - name: Run load for sysbench inside of primary mysql node - shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --threads=16 --time=60 --mysql-table-engine=InnoDB run + shell: docker exec {{ container_prefix }}1 sysbench /usr/share/sysbench/oltp_read_write.lua --mysql-host=127.0.0.1 --mysql-port=3306 --mysql-user=sbtest --mysql-password=password --mysql-db=sbtest --tables=10 --table-size=100000 --threads=16 --time=60 run when: setup_type == "gr" and setup_type == "replication" - name: Copy a load file into the container From 377a6d16ab11641cd210f606c79e8370d1267218 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 19:19:14 +0100 Subject: [PATCH 26/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 106 +++++++++++++++---------------------- 1 file changed, 42 insertions(+), 64 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 8107f3bc..652075ed 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -1,84 +1,65 @@ -- ========================================================= -- InnoDB Compression Stress & Metrics Script (MySQL 8.4+) -- ========================================================= --- Adjust these first if desired -SET @rows_per_table := 100000; -- Base row count (increase for more stress) -SET @update_touch_fraction := 0.30; -- Fraction of rows to update (approx) -SET @delete_fraction := 0.10; -- Fraction of rows to delete (approx) +-- Adjust row count here (start smaller if resource constrained) +SET @rows_per_table := 100000; +SET @cte_depth := @rows_per_table + 10; -- headroom for recursion +SET SESSION cte_max_recursion_depth = @cte_depth; --- Raise recursion depth for large CTE generation -SET SESSION cte_max_recursion_depth = 200000; - --- Drop & recreate schema DROP DATABASE IF EXISTS innodb_compress_lab; CREATE DATABASE innodb_compress_lab; USE innodb_compress_lab; --- Optional: ensure per-table tablespace (generally default ON now) --- SHOW VARIABLES LIKE 'innodb_file_per_table'; +-- Drop any leftover tables (defensive) +DROP TABLE IF EXISTS t_comp_2; +DROP TABLE IF EXISTS t_comp_4; +DROP TABLE IF EXISTS t_comp_8; +DROP TABLE IF EXISTS t_comp_16; +DROP TABLE IF EXISTS t_mixed_8; -- ========================================================= --- Helper CTE for generating N numbers (1..@rows_per_table) --- ========================================================= -WITH RECURSIVE seq AS ( - SELECT 1 AS n - UNION ALL - SELECT n + 1 FROM seq WHERE n < @rows_per_table -) -SELECT COUNT(*) AS generated_rows INTO @generated_rows -FROM seq; --- At this point seq can be re-used inside each INSERT ... SELECT (we'll redefine per table). - +-- Create compressed tables (classic InnoDB compression) -- ========================================================= --- TABLE CREATION (different KEY_BLOCK_SIZE values) --- ========================================================= --- Highly compressible: repeated patterns CREATE TABLE t_comp_2 ( - id INT PRIMARY KEY AUTO_INCREMENT, + id INT AUTO_INCREMENT PRIMARY KEY, compressible TEXT, semi_random TEXT ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; CREATE TABLE t_comp_4 ( - id INT PRIMARY KEY AUTO_INCREMENT, + id INT AUTO_INCREMENT PRIMARY KEY, compressible TEXT, semi_random TEXT ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; CREATE TABLE t_comp_8 ( - id INT PRIMARY KEY AUTO_INCREMENT, + id INT AUTO_INCREMENT PRIMARY KEY, compressible TEXT, semi_random TEXT ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; CREATE TABLE t_comp_16 ( - id INT PRIMARY KEY AUTO_INCREMENT, + id INT AUTO_INCREMENT PRIMARY KEY, compressible TEXT, semi_random TEXT ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; --- A mixed workload table (different patterns) reusing KEY_BLOCK_SIZE=8 CREATE TABLE t_mixed_8 ( - id INT PRIMARY KEY AUTO_INCREMENT, + id INT AUTO_INCREMENT PRIMARY KEY, pattern_a TEXT, pattern_b TEXT, pattern_c TEXT ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; -- ========================================================= --- BEFORE METRICS SNAPSHOT +-- Initial metrics snapshot -- ========================================================= -SELECT 'BEFORE' AS phase, * FROM information_schema.innodb_cmp ORDER BY page_size; +SELECT 'BEFORE' AS phase, ic.* FROM information_schema.innodb_cmp ic ORDER BY page_size; -- ========================================================= --- DATA LOAD SECTION --- Each table: insert @rows_per_table rows with varied compressibility +-- Bulk Inserts (declare CTE separately for each table) -- ========================================================= --- Utility function via inline expressions: --- compressible: REPEAT('A', 5000) + REPEAT('B', 5000) etc. --- semi_random: CONCAT of pseudo-random fragments using MD5(RAND()) - -- t_comp_2 WITH RECURSIVE seq AS ( SELECT 1 AS n @@ -111,7 +92,7 @@ WITH RECURSIVE seq AS ( ) INSERT INTO t_comp_8 (compressible, semi_random) SELECT - REPEAT('LONGPATTERN1234567890', 600), -- ~12000 chars compressible + REPEAT('LONGPATTERN1234567890', 600), -- ~12k chars CONCAT(MD5(RAND()), MD5(RAND()), MD5(RAND()), MD5(RAND())) FROM seq; @@ -123,11 +104,11 @@ WITH RECURSIVE seq AS ( ) INSERT INTO t_comp_16 (compressible, semi_random) SELECT - REPEAT('QQQQQQQQQQ', 1500), -- 15000 chars of repeated Q + REPEAT('QQQQQQQQQQ', 1500), -- 15k repeated Q CONCAT(MD5(RAND()), '-', MD5(RAND()), '-', MD5(RAND()), '-', MD5(RAND())) FROM seq; --- t_mixed_8 (three distinct patterns) +-- t_mixed_8 WITH RECURSIVE seq AS ( SELECT 1 AS n UNION ALL @@ -141,18 +122,16 @@ SELECT FROM seq; -- ========================================================= --- INTERMEDIATE METRICS (after inserts) +-- Metrics after inserts -- ========================================================= -SELECT 'AFTER_INSERTS' AS phase, * FROM information_schema.innodb_cmp ORDER BY page_size; +SELECT 'AFTER_INSERTS' AS phase, ic.* FROM information_schema.innodb_cmp ic ORDER BY page_size; -- ========================================================= --- HEAVY UPDATE CYCLES (touch ~30% of rows) +-- Heavy updates (approx fractions via modular predicates) -- ========================================================= --- Use modulus predicates for approximate fractions - UPDATE t_comp_2 SET compressible = CONCAT(REPEAT('UPDATEDA', 3000), REPEAT('UPDATEDB', 3000)) -WHERE id % 10 IN (0,1,2); -- ~30% +WHERE id % 10 IN (0,1,2); UPDATE t_comp_4 SET semi_random = CONCAT(MD5(RAND()), MD5(RAND()), REPEAT('UPD', 2000)) @@ -160,18 +139,18 @@ WHERE id % 10 IN (0,1,2); UPDATE t_comp_8 SET compressible = REPEAT('UP8_', 4000) -WHERE id % 5 = 0; -- 20% +WHERE id % 5 = 0; UPDATE t_comp_16 SET semi_random = CONCAT(REPEAT('CHANGED', 1000), MD5(RAND())) -WHERE id % 4 = 0; -- 25% +WHERE id % 4 = 0; UPDATE t_mixed_8 SET pattern_b = REPEAT('REWRITEPATTERN', 3000) -WHERE id % 3 = 0; -- ~33% +WHERE id % 3 = 0; -- ========================================================= --- DELETE FRACTION (approx 10%) to cause page reorganizations +-- Deletes (~10%) to force page reorganization -- ========================================================= DELETE FROM t_comp_2 WHERE id % 10 = 0; DELETE FROM t_comp_4 WHERE id % 10 = 0; @@ -180,8 +159,8 @@ DELETE FROM t_comp_16 WHERE id % 10 = 0; DELETE FROM t_mixed_8 WHERE id % 10 = 0; -- ========================================================= --- OPTIMIZE TABLE (forces rebuild & compression) - optional & expensive --- You can comment these out if runtime is too long. +-- Optional: OPTIMIZE (expensive; triggers further compression) +-- Comment these out if runtime is excessive -- ========================================================= OPTIMIZE TABLE t_comp_2; OPTIMIZE TABLE t_comp_4; @@ -190,19 +169,20 @@ OPTIMIZE TABLE t_comp_16; OPTIMIZE TABLE t_mixed_8; -- ========================================================= --- FINAL METRICS SNAPSHOT +-- Final metrics snapshots -- ========================================================= -SELECT 'FINAL' AS phase, * FROM information_schema.innodb_cmp ORDER BY page_size; +SELECT 'FINAL' AS phase, ic.* FROM information_schema.innodb_cmp ic ORDER BY page_size; --- Focused view (selected columns) SELECT 'FINAL_FOCUSED' AS phase, - page_size, compress_ops, compress_time, uncompress_ops, uncompress_time -FROM information_schema.innodb_cmp + ic.page_size, + ic.compress_ops, + ic.compress_time, + ic.uncompress_ops, + ic.uncompress_time +FROM information_schema.innodb_cmp ic ORDER BY page_size; --- ========================================================= --- OPTIONAL: Show table sizes --- ========================================================= +-- Table size overview SELECT table_name, engine, row_format, @@ -213,7 +193,5 @@ FROM information_schema.tables WHERE table_schema='innodb_compress_lab' ORDER BY total_mb DESC; --- ========================================================= --- CLEANUP (uncomment if you want to drop everything at end) --- ========================================================= +-- Cleanup option (leave commented if you want to inspect) -- DROP DATABASE innodb_compress_lab; \ No newline at end of file From e04b865004218977e432dfe23c003f8cb6269b0f Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 19:26:05 +0100 Subject: [PATCH 27/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 283 ++++++++++++------------------------- 1 file changed, 90 insertions(+), 193 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 652075ed..21334e24 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -1,197 +1,94 @@ --- ========================================================= --- InnoDB Compression Stress & Metrics Script (MySQL 8.4+) --- ========================================================= --- Adjust row count here (start smaller if resource constrained) -SET @rows_per_table := 100000; -SET @cte_depth := @rows_per_table + 10; -- headroom for recursion -SET SESSION cte_max_recursion_depth = @cte_depth; - -DROP DATABASE IF EXISTS innodb_compress_lab; -CREATE DATABASE innodb_compress_lab; -USE innodb_compress_lab; - --- Drop any leftover tables (defensive) -DROP TABLE IF EXISTS t_comp_2; -DROP TABLE IF EXISTS t_comp_4; -DROP TABLE IF EXISTS t_comp_8; -DROP TABLE IF EXISTS t_comp_16; -DROP TABLE IF EXISTS t_mixed_8; - --- ========================================================= --- Create compressed tables (classic InnoDB compression) --- ========================================================= -CREATE TABLE t_comp_2 ( - id INT AUTO_INCREMENT PRIMARY KEY, - compressible TEXT, - semi_random TEXT -) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; - -CREATE TABLE t_comp_4 ( - id INT AUTO_INCREMENT PRIMARY KEY, - compressible TEXT, - semi_random TEXT -) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; - -CREATE TABLE t_comp_8 ( - id INT AUTO_INCREMENT PRIMARY KEY, - compressible TEXT, - semi_random TEXT +-- ======================================== +-- CREATE TABLES +-- ======================================== + +CREATE TABLE students ( + student_id INT AUTO_INCREMENT PRIMARY KEY, + first_name VARCHAR(50), + last_name VARCHAR(50), + birth_date DATE ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; -CREATE TABLE t_comp_16 ( - id INT AUTO_INCREMENT PRIMARY KEY, - compressible TEXT, - semi_random TEXT -) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; - -CREATE TABLE t_mixed_8 ( - id INT AUTO_INCREMENT PRIMARY KEY, - pattern_a TEXT, - pattern_b TEXT, - pattern_c TEXT +CREATE TABLE classes ( + class_id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(100), + teacher VARCHAR(100) ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; --- ========================================================= --- Initial metrics snapshot --- ========================================================= -SELECT 'BEFORE' AS phase, ic.* FROM information_schema.innodb_cmp ic ORDER BY page_size; - --- ========================================================= --- Bulk Inserts (declare CTE separately for each table) --- ========================================================= - --- t_comp_2 -WITH RECURSIVE seq AS ( - SELECT 1 AS n - UNION ALL - SELECT n + 1 FROM seq WHERE n < @rows_per_table -) -INSERT INTO t_comp_2 (compressible, semi_random) -SELECT - CONCAT(REPEAT('A', 4000), REPEAT('B', 4000), REPEAT('C', 2000)), - CONCAT(MD5(RAND()), MD5(RAND()), MD5(RAND())) -FROM seq; - --- t_comp_4 -WITH RECURSIVE seq AS ( - SELECT 1 AS n - UNION ALL - SELECT n + 1 FROM seq WHERE n < @rows_per_table -) -INSERT INTO t_comp_4 (compressible, semi_random) -SELECT - CONCAT(REPEAT('X', 3000), REPEAT('Y', 3000), REPEAT('Z', 4000)), - CONCAT(MD5(RAND()), ':', MD5(RAND()), ':', MD5(RAND())) -FROM seq; - --- t_comp_8 -WITH RECURSIVE seq AS ( - SELECT 1 AS n - UNION ALL - SELECT n + 1 FROM seq WHERE n < @rows_per_table -) -INSERT INTO t_comp_8 (compressible, semi_random) -SELECT - REPEAT('LONGPATTERN1234567890', 600), -- ~12k chars - CONCAT(MD5(RAND()), MD5(RAND()), MD5(RAND()), MD5(RAND())) -FROM seq; - --- t_comp_16 -WITH RECURSIVE seq AS ( - SELECT 1 AS n - UNION ALL - SELECT n + 1 FROM seq WHERE n < @rows_per_table -) -INSERT INTO t_comp_16 (compressible, semi_random) -SELECT - REPEAT('QQQQQQQQQQ', 1500), -- 15k repeated Q - CONCAT(MD5(RAND()), '-', MD5(RAND()), '-', MD5(RAND()), '-', MD5(RAND())) -FROM seq; - --- t_mixed_8 -WITH RECURSIVE seq AS ( - SELECT 1 AS n - UNION ALL - SELECT n + 1 FROM seq WHERE n < @rows_per_table -) -INSERT INTO t_mixed_8 (pattern_a, pattern_b, pattern_c) -SELECT - REPEAT('M', 8000), - CONCAT(REPEAT('N1', 2000), REPEAT('N2', 2000)), - CONCAT(MD5(RAND()), REPEAT('R', 1000), MD5(RAND())) -FROM seq; - --- ========================================================= --- Metrics after inserts --- ========================================================= -SELECT 'AFTER_INSERTS' AS phase, ic.* FROM information_schema.innodb_cmp ic ORDER BY page_size; - --- ========================================================= --- Heavy updates (approx fractions via modular predicates) --- ========================================================= -UPDATE t_comp_2 -SET compressible = CONCAT(REPEAT('UPDATEDA', 3000), REPEAT('UPDATEDB', 3000)) -WHERE id % 10 IN (0,1,2); - -UPDATE t_comp_4 -SET semi_random = CONCAT(MD5(RAND()), MD5(RAND()), REPEAT('UPD', 2000)) -WHERE id % 10 IN (0,1,2); - -UPDATE t_comp_8 -SET compressible = REPEAT('UP8_', 4000) -WHERE id % 5 = 0; - -UPDATE t_comp_16 -SET semi_random = CONCAT(REPEAT('CHANGED', 1000), MD5(RAND())) -WHERE id % 4 = 0; - -UPDATE t_mixed_8 -SET pattern_b = REPEAT('REWRITEPATTERN', 3000) -WHERE id % 3 = 0; - --- ========================================================= --- Deletes (~10%) to force page reorganization --- ========================================================= -DELETE FROM t_comp_2 WHERE id % 10 = 0; -DELETE FROM t_comp_4 WHERE id % 10 = 0; -DELETE FROM t_comp_8 WHERE id % 10 = 0; -DELETE FROM t_comp_16 WHERE id % 10 = 0; -DELETE FROM t_mixed_8 WHERE id % 10 = 0; - --- ========================================================= --- Optional: OPTIMIZE (expensive; triggers further compression) --- Comment these out if runtime is excessive --- ========================================================= -OPTIMIZE TABLE t_comp_2; -OPTIMIZE TABLE t_comp_4; -OPTIMIZE TABLE t_comp_8; -OPTIMIZE TABLE t_comp_16; -OPTIMIZE TABLE t_mixed_8; - --- ========================================================= --- Final metrics snapshots --- ========================================================= -SELECT 'FINAL' AS phase, ic.* FROM information_schema.innodb_cmp ic ORDER BY page_size; - -SELECT 'FINAL_FOCUSED' AS phase, - ic.page_size, - ic.compress_ops, - ic.compress_time, - ic.uncompress_ops, - ic.uncompress_time -FROM information_schema.innodb_cmp ic -ORDER BY page_size; - --- Table size overview -SELECT table_name, - engine, - row_format, - DATA_LENGTH/1024/1024 AS data_mb, - INDEX_LENGTH/1024/1024 AS index_mb, - (DATA_LENGTH+INDEX_LENGTH)/1024/1024 AS total_mb -FROM information_schema.tables -WHERE table_schema='innodb_compress_lab' -ORDER BY total_mb DESC; - --- Cleanup option (leave commented if you want to inspect) --- DROP DATABASE innodb_compress_lab; \ No newline at end of file +CREATE TABLE enrollments ( + enrollment_id INT AUTO_INCREMENT PRIMARY KEY, + student_id INT, + class_id INT, + enrollment_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (student_id) REFERENCES students(student_id), + FOREIGN KEY (class_id) REFERENCES classes(class_id) +) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; + +-- ======================================== +-- INSERT INITIAL DATA +-- ======================================== + +INSERT INTO students (first_name, last_name, birth_date) VALUES +('Alice', 'Smith', '2005-04-10'), +('Bob', 'Johnson', '2006-08-15'), +('Charlie', 'Brown', '2004-12-01'); + +INSERT INTO classes (name, teacher) VALUES +('Mathematics', 'Mrs. Taylor'), +('History', 'Mr. Anderson'), +('Science', 'Dr. Reynolds'); + +INSERT INTO enrollments (student_id, class_id) VALUES +(1, 1), +(1, 2), +(2, 2), +(3, 1), +(3, 3); + +-- ======================================== +-- SELECT: View all data after insert +-- ======================================== + +-- View all students +SELECT * FROM students; + +-- View all classes +SELECT * FROM classes; + +-- View all enrollments +SELECT * FROM enrollments; + +-- View students enrolled in Mathematics +SELECT s.first_name, s.last_name +FROM students s +JOIN enrollments e ON s.student_id = e.student_id +JOIN classes c ON e.class_id = c.class_id +WHERE c.name = 'Mathematics'; + +-- Count students per class +SELECT c.name AS class_name, COUNT(e.student_id) AS student_count +FROM classes c +LEFT JOIN enrollments e ON c.class_id = e.class_id +GROUP BY c.name; + +-- ======================================== +-- UPDATE DATA +-- ======================================== + +UPDATE students +SET last_name = 'Williams' +WHERE first_name = 'Bob' AND last_name = 'Johnson'; + +UPDATE classes +SET teacher = 'Ms. Carter' +WHERE name = 'History'; + +-- ======================================== +-- DELETE DATA +-- ======================================== + +DELETE FROM enrollments +WHERE student_id = (SELECT student_id FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'); + +DELETE FROM students +WHERE first_name = 'Alice' AND last_name = 'Smith'; \ No newline at end of file From 97e7c6c4a8a3c4340e3d1a3546db00b1e1f43d8b Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 19:42:02 +0100 Subject: [PATCH 28/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 90 +++++++++++++++++++++++++++++++++++++- 1 file changed, 89 insertions(+), 1 deletion(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 21334e24..5c5dc718 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -91,4 +91,92 @@ DELETE FROM enrollments WHERE student_id = (SELECT student_id FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'); DELETE FROM students -WHERE first_name = 'Alice' AND last_name = 'Smith'; \ No newline at end of file +WHERE first_name = 'Alice' AND last_name = 'Smith'; + +-- ======================================== +-- COMPRESSION METRIC BOOSTER (MINIMAL) +-- ======================================== + +-- 1. Confirm row format (should say "Compressed") +SHOW TABLE STATUS LIKE 'students'\G +SHOW TABLE STATUS LIKE 'classes'\G +SHOW TABLE STATUS LIKE 'enrollments'\G + +-- 2. Add a compressible TEXT column (if not already present) +ALTER TABLE students ADD COLUMN notes TEXT NULL; + +-- 3. Bulk-fill extra rows with highly compressible data +-- Adjust @rows if you want more. +SET @rows := 20000; + +-- Build a numbers helper inline (100 x 200 = 20,000 rows) +INSERT INTO students (first_name, last_name, birth_date, notes) +SELECT CONCAT('Extra', n.seq) AS first_name, + CONCAT('User', n.seq) AS last_name, + DATE('2005-01-01') + INTERVAL (n.seq % 365) DAY AS birth_date, + RPAD('LoremIpsum ', 800, 'LoremIpsum ') AS notes +FROM ( + SELECT (a.i*200) + b.i AS seq + FROM (SELECT 0 i UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) a + CROSS JOIN ( + SELECT 0 i UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 + UNION ALL SELECT 10 UNION ALL SELECT 11 UNION ALL SELECT 12 UNION ALL SELECT 13 UNION ALL SELECT 14 + UNION ALL SELECT 15 UNION ALL SELECT 16 UNION ALL SELECT 17 UNION ALL SELECT 18 UNION ALL SELECT 19 + UNION ALL SELECT 20 UNION ALL SELECT 21 UNION ALL SELECT 22 UNION ALL SELECT 23 UNION ALL SELECT 24 + UNION ALL SELECT 25 UNION ALL SELECT 26 UNION ALL SELECT 27 UNION ALL SELECT 28 UNION ALL SELECT 29 + UNION ALL SELECT 30 UNION ALL SELECT 31 UNION ALL SELECT 32 UNION ALL SELECT 33 UNION ALL SELECT 34 + UNION ALL SELECT 35 UNION ALL SELECT 36 UNION ALL SELECT 37 UNION ALL SELECT 38 UNION ALL SELECT 39 + UNION ALL SELECT 40 UNION ALL SELECT 41 UNION ALL SELECT 42 UNION ALL SELECT 43 UNION ALL SELECT 44 + UNION ALL SELECT 45 UNION ALL SELECT 46 UNION ALL SELECT 47 UNION ALL SELECT 48 UNION ALL SELECT 49 + UNION ALL SELECT 50 UNION ALL SELECT 51 UNION ALL SELECT 52 UNION ALL SELECT 53 UNION ALL SELECT 54 + UNION ALL SELECT 55 UNION ALL SELECT 56 UNION ALL SELECT 57 UNION ALL SELECT 58 UNION ALL SELECT 59 + UNION ALL SELECT 60 UNION ALL SELECT 61 UNION ALL SELECT 62 UNION ALL SELECT 63 UNION ALL SELECT 64 + UNION ALL SELECT 65 UNION ALL SELECT 66 UNION ALL SELECT 67 UNION ALL SELECT 68 UNION ALL SELECT 69 + UNION ALL SELECT 70 UNION ALL SELECT 71 UNION ALL SELECT 72 UNION ALL SELECT 73 UNION ALL SELECT 74 + UNION ALL SELECT 75 UNION ALL SELECT 76 UNION ALL SELECT 77 UNION ALL SELECT 78 UNION ALL SELECT 79 + UNION ALL SELECT 80 UNION ALL SELECT 81 UNION ALL SELECT 82 UNION ALL SELECT 83 UNION ALL SELECT 84 + UNION ALL SELECT 85 UNION ALL SELECT 86 UNION ALL SELECT 87 UNION ALL SELECT 88 UNION ALL SELECT 89 + UNION ALL SELECT 90 UNION ALL SELECT 91 UNION ALL SELECT 92 UNION ALL SELECT 93 UNION ALL SELECT 94 + UNION ALL SELECT 95 UNION ALL SELECT 96 UNION ALL SELECT 97 UNION ALL SELECT 98 UNION ALL SELECT 99 + ) b + WHERE (a.i*200)+b.i < @rows +) n; + +-- 4. View current compression counters +SELECT PAGE_SIZE, COMPRESS_OPS, COMPRESS_TIME, UNCOMPRESS_OPS, UNCOMPRESS_TIME +FROM information_schema.INNODB_CMP; + +-- 5. Flush table metadata (does NOT guarantee eviction, but harmless) +FLUSH TABLE students; + +-- 6. Workload generator: random range reads to stimulate uncompress +DELIMITER $$ +CREATE PROCEDURE run_students_reads(IN loops INT) +BEGIN + DECLARE i INT DEFAULT 0; + DECLARE start_id INT; + DECLARE dummy BIGINT; + WHILE i < loops DO + SET start_id = FLOOR(RAND()*@rows) + 1; + -- Use SQL_NO_CACHE to skip the query cache (if enabled) + SELECT /*+ SQL_NO_CACHE */ COUNT(*) INTO dummy + FROM students + WHERE student_id BETWEEN start_id AND start_id + 50; + SET i = i + 1; + END WHILE; +END$$ +DELIMITER ; + +CALL run_students_reads(5000); + +-- 7. Check counters again +SELECT PAGE_SIZE, COMPRESS_OPS, COMPRESS_TIME, UNCOMPRESS_OPS, UNCOMPRESS_TIME +FROM information_schema.INNODB_CMP; + +-- If still zero, optionally rebuild (forces new compressed pages) then re-run reads: + ALTER TABLE students FORCE; + CALL run_students_reads(8000); + SELECT UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; + From fb2d108c2b27cdb0a3674184b9676f1a6cef8305 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Mon, 24 Nov 2025 19:56:06 +0100 Subject: [PATCH 29/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 165 ++++++++++++++++++++++++------------- 1 file changed, 106 insertions(+), 59 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 5c5dc718..7658f2a2 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -94,89 +94,136 @@ DELETE FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'; -- ======================================== --- COMPRESSION METRIC BOOSTER (MINIMAL) +-- AGGRESSIVE COMPRESSION METRIC CHURN -- ======================================== --- 1. Confirm row format (should say "Compressed") -SHOW TABLE STATUS LIKE 'students'\G -SHOW TABLE STATUS LIKE 'classes'\G -SHOW TABLE STATUS LIKE 'enrollments'\G - --- 2. Add a compressible TEXT column (if not already present) -ALTER TABLE students ADD COLUMN notes TEXT NULL; +-- Inspect buffer pool size (bytes) +SHOW VARIABLES LIKE 'innodb_buffer_pool_size'; + +-- 1. Create a LARGE compressed table (bigger than buffer pool) +DROP TABLE IF EXISTS big_students; +CREATE TABLE big_students ( + id INT AUTO_INCREMENT PRIMARY KEY, + pad1 VARCHAR(100), + pad2 VARCHAR(100), + notes TEXT, + filler TEXT +) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; --- 3. Bulk-fill extra rows with highly compressible data --- Adjust @rows if you want more. -SET @rows := 20000; +-- 2. Create a LARGE uncompressed (eviction) table +DROP TABLE IF EXISTS evict_buffer; +CREATE TABLE evict_buffer ( + id INT AUTO_INCREMENT PRIMARY KEY, + junk VARCHAR(100), + blobdata TEXT +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; + +-- 3. Bulk insert rows into big_students (compressible data) +-- Adjust @rows_big upward (e.g. 2_000_000) if buffer pool is large. +SET @rows_big := 800000; + +-- Insert in chunks using a numbers generator (10 x 10 x 10 x 8,000 expansion) +-- We construct ~@rows_big rows of highly compressible text. +INSERT INTO big_students (pad1, pad2, notes, filler) +SELECT + CONCAT('P1_', n.seq), + CONCAT('P2_', n.seq), + RPAD('COMPRESSIBLE_', 1200, 'COMPRESSIBLE_'), + RPAD('FILL', 800, 'FILL') +FROM ( + SELECT (@row := @row + 1) AS seq + FROM + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t2, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t3, + (SELECT @row := 0) init + LIMIT @rows_big +) n; --- Build a numbers helper inline (100 x 200 = 20,000 rows) -INSERT INTO students (first_name, last_name, birth_date, notes) -SELECT CONCAT('Extra', n.seq) AS first_name, - CONCAT('User', n.seq) AS last_name, - DATE('2005-01-01') + INTERVAL (n.seq % 365) DAY AS birth_date, - RPAD('LoremIpsum ', 800, 'LoremIpsum ') AS notes +-- 4. Bulk insert rows into evict_buffer (uncompressed & less compressible) +SET @rows_evict := 600000; +INSERT INTO evict_buffer (junk, blobdata) +SELECT + CONCAT('J', n.seq), + -- Less compressible pseudo-random-ish data (vary characters) + CONCAT( + MD5(RAND()), '_', MD5(RAND()), '_', + RPAD(MD5(RAND()), 300, 'Z') + ) FROM ( - SELECT (a.i*200) + b.i AS seq - FROM (SELECT 0 i UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) a - CROSS JOIN ( - SELECT 0 i UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 - UNION ALL SELECT 10 UNION ALL SELECT 11 UNION ALL SELECT 12 UNION ALL SELECT 13 UNION ALL SELECT 14 - UNION ALL SELECT 15 UNION ALL SELECT 16 UNION ALL SELECT 17 UNION ALL SELECT 18 UNION ALL SELECT 19 - UNION ALL SELECT 20 UNION ALL SELECT 21 UNION ALL SELECT 22 UNION ALL SELECT 23 UNION ALL SELECT 24 - UNION ALL SELECT 25 UNION ALL SELECT 26 UNION ALL SELECT 27 UNION ALL SELECT 28 UNION ALL SELECT 29 - UNION ALL SELECT 30 UNION ALL SELECT 31 UNION ALL SELECT 32 UNION ALL SELECT 33 UNION ALL SELECT 34 - UNION ALL SELECT 35 UNION ALL SELECT 36 UNION ALL SELECT 37 UNION ALL SELECT 38 UNION ALL SELECT 39 - UNION ALL SELECT 40 UNION ALL SELECT 41 UNION ALL SELECT 42 UNION ALL SELECT 43 UNION ALL SELECT 44 - UNION ALL SELECT 45 UNION ALL SELECT 46 UNION ALL SELECT 47 UNION ALL SELECT 48 UNION ALL SELECT 49 - UNION ALL SELECT 50 UNION ALL SELECT 51 UNION ALL SELECT 52 UNION ALL SELECT 53 UNION ALL SELECT 54 - UNION ALL SELECT 55 UNION ALL SELECT 56 UNION ALL SELECT 57 UNION ALL SELECT 58 UNION ALL SELECT 59 - UNION ALL SELECT 60 UNION ALL SELECT 61 UNION ALL SELECT 62 UNION ALL SELECT 63 UNION ALL SELECT 64 - UNION ALL SELECT 65 UNION ALL SELECT 66 UNION ALL SELECT 67 UNION ALL SELECT 68 UNION ALL SELECT 69 - UNION ALL SELECT 70 UNION ALL SELECT 71 UNION ALL SELECT 72 UNION ALL SELECT 73 UNION ALL SELECT 74 - UNION ALL SELECT 75 UNION ALL SELECT 76 UNION ALL SELECT 77 UNION ALL SELECT 78 UNION ALL SELECT 79 - UNION ALL SELECT 80 UNION ALL SELECT 81 UNION ALL SELECT 82 UNION ALL SELECT 83 UNION ALL SELECT 84 - UNION ALL SELECT 85 UNION ALL SELECT 86 UNION ALL SELECT 87 UNION ALL SELECT 88 UNION ALL SELECT 89 - UNION ALL SELECT 90 UNION ALL SELECT 91 UNION ALL SELECT 92 UNION ALL SELECT 93 UNION ALL SELECT 94 - UNION ALL SELECT 95 UNION ALL SELECT 96 UNION ALL SELECT 97 UNION ALL SELECT 98 UNION ALL SELECT 99 - ) b - WHERE (a.i*200)+b.i < @rows + SELECT (@row2 := @row2 + 1) AS seq + FROM + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) a, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) b, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) c, + (SELECT @row2 := 0) init + LIMIT @rows_evict ) n; --- 4. View current compression counters +-- 5. Check row formats & sizes +SHOW TABLE STATUS LIKE 'big_students'\G +SHOW TABLE STATUS LIKE 'evict_buffer'\G + +-- 6. Initial compression counters SELECT PAGE_SIZE, COMPRESS_OPS, COMPRESS_TIME, UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; --- 5. Flush table metadata (does NOT guarantee eviction, but harmless) -FLUSH TABLE students; - --- 6. Workload generator: random range reads to stimulate uncompress +-- 7. Workload procedures (interleaved access) DELIMITER $$ -CREATE PROCEDURE run_students_reads(IN loops INT) +CREATE PROCEDURE churn(IN loops INT) BEGIN DECLARE i INT DEFAULT 0; - DECLARE start_id INT; + DECLARE rstart INT; DECLARE dummy BIGINT; WHILE i < loops DO - SET start_id = FLOOR(RAND()*@rows) + 1; - -- Use SQL_NO_CACHE to skip the query cache (if enabled) + -- Random range read on compressed table (SQL_NO_CACHE) + SET rstart = FLOOR(RAND() * @rows_big) + 1; + SELECT /*+ SQL_NO_CACHE */ SUM(id) INTO dummy + FROM big_students + WHERE id BETWEEN rstart AND rstart + 150; + + -- Scan slice of eviction table to evict pages + SET rstart = FLOOR(RAND() * @rows_evict) + 1; SELECT /*+ SQL_NO_CACHE */ COUNT(*) INTO dummy - FROM students - WHERE student_id BETWEEN start_id AND start_id + 50; + FROM evict_buffer + WHERE id BETWEEN rstart AND rstart + 2000; + + -- Occasional full-ish scan segment to push more evictions + IF (i % 50 = 0) THEN + SELECT /*+ SQL_NO_CACHE */ AVG(id) INTO dummy + FROM evict_buffer + WHERE id BETWEEN rstart AND rstart + 25000; + END IF; + + -- Update chunk on compressed table (forces page writes & reads) + IF (i % 20 = 0) THEN + UPDATE big_students + SET filler = CONCAT(filler, 'X') + WHERE id BETWEEN rstart AND rstart + 120; + END IF; + SET i = i + 1; END WHILE; END$$ DELIMITER ; -CALL run_students_reads(5000); +-- 8. Run churn more than once (increase loops if needed) +CALL churn(2000); +CALL churn(2000); +CALL churn(3000); --- 7. Check counters again +-- 9. Re-check counters SELECT PAGE_SIZE, COMPRESS_OPS, COMPRESS_TIME, UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; --- If still zero, optionally rebuild (forces new compressed pages) then re-run reads: - ALTER TABLE students FORCE; - CALL run_students_reads(8000); +-- 10. Optional: FORCE table rebuild (creates new compressed pages) then churn again + ALTER TABLE big_students FORCE; + CALL churn(3000); SELECT UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; From 5612bbb38f547ab39d639723040d3fc1d4ca3489 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Tue, 25 Nov 2025 07:44:23 +0100 Subject: [PATCH 30/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 2 -- 1 file changed, 2 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 7658f2a2..ef0e0479 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -119,11 +119,9 @@ CREATE TABLE evict_buffer ( ) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; -- 3. Bulk insert rows into big_students (compressible data) --- Adjust @rows_big upward (e.g. 2_000_000) if buffer pool is large. SET @rows_big := 800000; -- Insert in chunks using a numbers generator (10 x 10 x 10 x 8,000 expansion) --- We construct ~@rows_big rows of highly compressible text. INSERT INTO big_students (pad1, pad2, notes, filler) SELECT CONCAT('P1_', n.seq), From e3e972421f684519257a876c161fad8f9cdeb175 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Tue, 25 Nov 2025 07:52:25 +0100 Subject: [PATCH 31/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 187 ++++++++++++++++++++++--------------- 1 file changed, 113 insertions(+), 74 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index ef0e0479..de707949 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -94,13 +94,12 @@ DELETE FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'; -- ======================================== --- AGGRESSIVE COMPRESSION METRIC CHURN +-- AGGRESSIVE COMPRESSION METRIC CHURN (Chunked) -- ======================================== --- Inspect buffer pool size (bytes) +-- Inspect buffer pool (bytes) SHOW VARIABLES LIKE 'innodb_buffer_pool_size'; --- 1. Create a LARGE compressed table (bigger than buffer pool) DROP TABLE IF EXISTS big_students; CREATE TABLE big_students ( id INT AUTO_INCREMENT PRIMARY KEY, @@ -110,7 +109,6 @@ CREATE TABLE big_students ( filler TEXT ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; --- 2. Create a LARGE uncompressed (eviction) table DROP TABLE IF EXISTS evict_buffer; CREATE TABLE evict_buffer ( id INT AUTO_INCREMENT PRIMARY KEY, @@ -118,61 +116,107 @@ CREATE TABLE evict_buffer ( blobdata TEXT ) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; --- 3. Bulk insert rows into big_students (compressible data) -SET @rows_big := 800000; - --- Insert in chunks using a numbers generator (10 x 10 x 10 x 8,000 expansion) -INSERT INTO big_students (pad1, pad2, notes, filler) -SELECT - CONCAT('P1_', n.seq), - CONCAT('P2_', n.seq), - RPAD('COMPRESSIBLE_', 1200, 'COMPRESSIBLE_'), - RPAD('FILL', 800, 'FILL') -FROM ( - SELECT (@row := @row + 1) AS seq - FROM - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t2, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t3, - (SELECT @row := 0) init - LIMIT @rows_big -) n; - --- 4. Bulk insert rows into evict_buffer (uncompressed & less compressible) -SET @rows_evict := 600000; -INSERT INTO evict_buffer (junk, blobdata) -SELECT - CONCAT('J', n.seq), - -- Less compressible pseudo-random-ish data (vary characters) - CONCAT( - MD5(RAND()), '_', MD5(RAND()), '_', - RPAD(MD5(RAND()), 300, 'Z') - ) -FROM ( - SELECT (@row2 := @row2 + 1) AS seq - FROM - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) a, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) b, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) c, - (SELECT @row2 := 0) init - LIMIT @rows_evict -) n; - --- 5. Check row formats & sizes +-- How many total rows you want (adjust upward if buffer pool is large) +SET @rows_big := 800000; -- compressed table target +SET @rows_evict := 600000; -- eviction table target + +-- We will insert in CHUNKS of 50,000 rows to avoid huge single INSERT +SET @chunk_size := 50000; + +-- Helper: make small number tables (0..99) and (0..499) for expansion +DROP TEMPORARY TABLE IF EXISTS nums100; +CREATE TEMPORARY TABLE nums100 (n INT PRIMARY KEY); +INSERT INTO nums100(n) +SELECT seq FROM ( + SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 +) d1, +( + SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 +) d2; -- 10 x 10 = 100 + +DROP TEMPORARY TABLE IF EXISTS nums500; +CREATE TEMPORARY TABLE nums500 (n INT PRIMARY KEY); +INSERT INTO nums500(n) +SELECT seq FROM ( + SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 +) d1, +( + SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 +) d2, +( + SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 +) d3, +( + SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 +) d4, +( + SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 +) d5; -- 10^5 = 100000 (we will LIMIT to 500 per 100 => 50000 rows per chunk) + +-- Procedure to load big_students in chunks +DELIMITER $$ +CREATE PROCEDURE fill_big_students(IN total INT, IN chunk INT) +BEGIN + DECLARE inserted INT DEFAULT 0; + WHILE inserted < total DO + INSERT INTO big_students (pad1, pad2, notes, filler) + SELECT + CONCAT('P1_', seq + inserted) AS pad1, + CONCAT('P2_', seq + inserted) AS pad2, + RPAD('COMPRESSIBLE_', 1200, 'COMPRESSIBLE_') AS notes, + RPAD('FILL', 800, 'FILL') AS filler + FROM ( + SELECT (a.n * 500) + b.n AS seq + FROM nums100 a + JOIN nums500 b ON b.n < 500 -- ensures 100 * 500 = 50,000 rows + ) gen + LIMIT chunk; + SET inserted = inserted + chunk; + END WHILE; +END$$ +DELIMITER ; + +-- Procedure to load evict_buffer in chunks (less compressible data) +DELIMITER $$ +CREATE PROCEDURE fill_evict_buffer(IN total INT, IN chunk INT) +BEGIN + DECLARE inserted INT DEFAULT 0; + WHILE inserted < total DO + INSERT INTO evict_buffer (junk, blobdata) + SELECT + CONCAT('J', seq + inserted), + CONCAT(MD5(RAND()), '_', MD5(RAND()), '_', RPAD(MD5(RAND()), 300, 'Z')) + FROM ( + SELECT (a.n * 500) + b.n AS seq + FROM nums100 a + JOIN nums500 b ON b.n < 500 + ) gen + LIMIT chunk; + SET inserted = inserted + chunk; + END WHILE; +END$$ +DELIMITER ; + +-- Execute loaders +CALL fill_big_students(@rows_big, @chunk_size); +CALL fill_evict_buffer(@rows_evict, @chunk_size); + +-- Verify formats SHOW TABLE STATUS LIKE 'big_students'\G SHOW TABLE STATUS LIKE 'evict_buffer'\G --- 6. Initial compression counters +-- Initial compression counters SELECT PAGE_SIZE, COMPRESS_OPS, COMPRESS_TIME, UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; --- 7. Workload procedures (interleaved access) +-- Churn procedure (interleaved access) DELIMITER $$ CREATE PROCEDURE churn(IN loops INT) BEGIN @@ -180,30 +224,26 @@ BEGIN DECLARE rstart INT; DECLARE dummy BIGINT; WHILE i < loops DO - -- Random range read on compressed table (SQL_NO_CACHE) SET rstart = FLOOR(RAND() * @rows_big) + 1; - SELECT /*+ SQL_NO_CACHE */ SUM(id) INTO dummy + SELECT SQL_NO_CACHE SUM(id) INTO dummy FROM big_students WHERE id BETWEEN rstart AND rstart + 150; - -- Scan slice of eviction table to evict pages SET rstart = FLOOR(RAND() * @rows_evict) + 1; - SELECT /*+ SQL_NO_CACHE */ COUNT(*) INTO dummy + SELECT SQL_NO_CACHE COUNT(*) INTO dummy FROM evict_buffer - WHERE id BETWEEN rstart AND rstart + 2000; + WHERE id BETWEEN rstart AND rstart + 3000; - -- Occasional full-ish scan segment to push more evictions - IF (i % 50 = 0) THEN - SELECT /*+ SQL_NO_CACHE */ AVG(id) INTO dummy + IF (i % 40 = 0) THEN + SELECT SQL_NO_CACHE AVG(id) INTO dummy FROM evict_buffer - WHERE id BETWEEN rstart AND rstart + 25000; + WHERE id BETWEEN rstart AND rstart + 30000; END IF; - -- Update chunk on compressed table (forces page writes & reads) - IF (i % 20 = 0) THEN + IF (i % 25 = 0) THEN UPDATE big_students SET filler = CONCAT(filler, 'X') - WHERE id BETWEEN rstart AND rstart + 120; + WHERE id BETWEEN rstart AND rstart + 180; END IF; SET i = i + 1; @@ -211,17 +251,16 @@ BEGIN END$$ DELIMITER ; --- 8. Run churn more than once (increase loops if needed) -CALL churn(2000); -CALL churn(2000); +-- Run multiple passes (increase loops if needed) +CALL churn(3000); CALL churn(3000); +CALL churn(4000); --- 9. Re-check counters +-- Check counters again SELECT PAGE_SIZE, COMPRESS_OPS, COMPRESS_TIME, UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; --- 10. Optional: FORCE table rebuild (creates new compressed pages) then churn again - ALTER TABLE big_students FORCE; - CALL churn(3000); - SELECT UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; - +-- Optional: FORCE rebuild to create fresh compressed pages and churn again +-- ALTER TABLE big_students FORCE; +-- CALL churn(4000); +-- SELECT UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; From d6f4e6e82114c8f1a0aa16e14a1774b7dbf10058 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Tue, 25 Nov 2025 07:58:25 +0100 Subject: [PATCH 32/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 98 ++++++++++++++++---------------------- 1 file changed, 41 insertions(+), 57 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index de707949..a8ac3839 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -94,10 +94,10 @@ DELETE FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'; -- ======================================== --- AGGRESSIVE COMPRESSION METRIC CHURN (Chunked) +-- AGGRESSIVE COMPRESSION METRIC CHURN (Chunked, Non-CTE) -- ======================================== --- Inspect buffer pool (bytes) +-- Inspect buffer pool SHOW VARIABLES LIKE 'innodb_buffer_pool_size'; DROP TABLE IF EXISTS big_students; @@ -116,51 +116,37 @@ CREATE TABLE evict_buffer ( blobdata TEXT ) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; --- How many total rows you want (adjust upward if buffer pool is large) -SET @rows_big := 800000; -- compressed table target -SET @rows_evict := 600000; -- eviction table target +-- Target row counts (increase if buffer pool is large) +SET @rows_big := 800000; -- compressed table +SET @rows_evict := 600000; -- eviction table +SET @chunk_size := 50000; -- rows per batch insert --- We will insert in CHUNKS of 50,000 rows to avoid huge single INSERT -SET @chunk_size := 50000; +-- Build helper numbers tables WITHOUT ambiguous column names +DROP TEMPORARY TABLE IF EXISTS nums10; +CREATE TEMPORARY TABLE nums10 (d TINYINT UNSIGNED PRIMARY KEY); +INSERT INTO nums10(d) VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); --- Helper: make small number tables (0..99) and (0..499) for expansion +-- nums100: 0..99 DROP TEMPORARY TABLE IF EXISTS nums100; CREATE TEMPORARY TABLE nums100 (n INT PRIMARY KEY); INSERT INTO nums100(n) -SELECT seq FROM ( - SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 -) d1, -( - SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 -) d2; -- 10 x 10 = 100 +SELECT t1.d*10 + t2.d +FROM nums10 t1 +CROSS JOIN nums10 t2 +ORDER BY 1; +-- nums500: 0..499 (use three digits: a*100 + b*10 + c) DROP TEMPORARY TABLE IF EXISTS nums500; CREATE TEMPORARY TABLE nums500 (n INT PRIMARY KEY); INSERT INTO nums500(n) -SELECT seq FROM ( - SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 -) d1, -( - SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 -) d2, -( - SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 -) d3, -( - SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 -) d4, -( - SELECT 0 AS seq UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9 -) d5; -- 10^5 = 100000 (we will LIMIT to 500 per 100 => 50000 rows per chunk) - --- Procedure to load big_students in chunks +SELECT a.d*100 + b.d*10 + c.d +FROM nums10 a +CROSS JOIN nums10 b +CROSS JOIN nums10 c +WHERE a.d*100 + b.d*10 + c.d < 500 +ORDER BY 1; + +-- Procedure: fill compressed table in chunks DELIMITER $$ CREATE PROCEDURE fill_big_students(IN total INT, IN chunk INT) BEGIN @@ -168,22 +154,19 @@ BEGIN WHILE inserted < total DO INSERT INTO big_students (pad1, pad2, notes, filler) SELECT - CONCAT('P1_', seq + inserted) AS pad1, - CONCAT('P2_', seq + inserted) AS pad2, + CONCAT('P1_', inserted + (a.n*500) + b.n) AS pad1, + CONCAT('P2_', inserted + (a.n*500) + b.n) AS pad2, RPAD('COMPRESSIBLE_', 1200, 'COMPRESSIBLE_') AS notes, RPAD('FILL', 800, 'FILL') AS filler - FROM ( - SELECT (a.n * 500) + b.n AS seq - FROM nums100 a - JOIN nums500 b ON b.n < 500 -- ensures 100 * 500 = 50,000 rows - ) gen + FROM nums100 a + JOIN nums500 b -- 100 * 500 = 50,000 rows per batch LIMIT chunk; SET inserted = inserted + chunk; END WHILE; END$$ DELIMITER ; --- Procedure to load evict_buffer in chunks (less compressible data) +-- Procedure: fill eviction table in chunks (less compressible) DELIMITER $$ CREATE PROCEDURE fill_evict_buffer(IN total INT, IN chunk INT) BEGIN @@ -191,13 +174,10 @@ BEGIN WHILE inserted < total DO INSERT INTO evict_buffer (junk, blobdata) SELECT - CONCAT('J', seq + inserted), + CONCAT('J', inserted + (a.n*500) + b.n), CONCAT(MD5(RAND()), '_', MD5(RAND()), '_', RPAD(MD5(RAND()), 300, 'Z')) - FROM ( - SELECT (a.n * 500) + b.n AS seq - FROM nums100 a - JOIN nums500 b ON b.n < 500 - ) gen + FROM nums100 a + JOIN nums500 b LIMIT chunk; SET inserted = inserted + chunk; END WHILE; @@ -208,15 +188,15 @@ DELIMITER ; CALL fill_big_students(@rows_big, @chunk_size); CALL fill_evict_buffer(@rows_evict, @chunk_size); --- Verify formats +-- Verify compressed row format SHOW TABLE STATUS LIKE 'big_students'\G SHOW TABLE STATUS LIKE 'evict_buffer'\G --- Initial compression counters +-- Baseline counters SELECT PAGE_SIZE, COMPRESS_OPS, COMPRESS_TIME, UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; --- Churn procedure (interleaved access) +-- Churn procedure DELIMITER $$ CREATE PROCEDURE churn(IN loops INT) BEGIN @@ -224,22 +204,26 @@ BEGIN DECLARE rstart INT; DECLARE dummy BIGINT; WHILE i < loops DO + -- Random range read on compressed table SET rstart = FLOOR(RAND() * @rows_big) + 1; SELECT SQL_NO_CACHE SUM(id) INTO dummy FROM big_students WHERE id BETWEEN rstart AND rstart + 150; + -- Eviction range read on dynamic table SET rstart = FLOOR(RAND() * @rows_evict) + 1; SELECT SQL_NO_CACHE COUNT(*) INTO dummy FROM evict_buffer WHERE id BETWEEN rstart AND rstart + 3000; + -- Occasional larger slice IF (i % 40 = 0) THEN SELECT SQL_NO_CACHE AVG(id) INTO dummy FROM evict_buffer WHERE id BETWEEN rstart AND rstart + 30000; END IF; + -- Update chunk on compressed table (forces page access/rewrite) IF (i % 25 = 0) THEN UPDATE big_students SET filler = CONCAT(filler, 'X') @@ -251,7 +235,7 @@ BEGIN END$$ DELIMITER ; --- Run multiple passes (increase loops if needed) +-- Run churn passes (increase loops if needed) CALL churn(3000); CALL churn(3000); CALL churn(4000); @@ -260,7 +244,7 @@ CALL churn(4000); SELECT PAGE_SIZE, COMPRESS_OPS, COMPRESS_TIME, UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; --- Optional: FORCE rebuild to create fresh compressed pages and churn again +-- Optional rebuild & extra churn -- ALTER TABLE big_students FORCE; -- CALL churn(4000); -- SELECT UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; From ad927b3b7be19dce279d2af96bd37949e0536ba6 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Tue, 25 Nov 2025 08:07:46 +0100 Subject: [PATCH 33/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 99 +++++++++++++++++++------------------- 1 file changed, 49 insertions(+), 50 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index a8ac3839..879f948c 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -94,13 +94,17 @@ DELETE FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'; -- ======================================== --- AGGRESSIVE COMPRESSION METRIC CHURN (Chunked, Non-CTE) +-- AGGRESSIVE COMPRESSION METRIC CHURN (Stable Version) -- ======================================== --- Inspect buffer pool +-- 0. (Optional) Verify buffer pool size; use it to size tables SHOW VARIABLES LIKE 'innodb_buffer_pool_size'; +-- 1. Drop if exist DROP TABLE IF EXISTS big_students; +DROP TABLE IF EXISTS evict_buffer; + +-- 2. Create large compressed table CREATE TABLE big_students ( id INT AUTO_INCREMENT PRIMARY KEY, pad1 VARCHAR(100), @@ -109,44 +113,38 @@ CREATE TABLE big_students ( filler TEXT ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; -DROP TABLE IF EXISTS evict_buffer; +-- 3. Create large dynamic (uncompressed) eviction table CREATE TABLE evict_buffer ( id INT AUTO_INCREMENT PRIMARY KEY, junk VARCHAR(100), blobdata TEXT ) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; --- Target row counts (increase if buffer pool is large) -SET @rows_big := 800000; -- compressed table -SET @rows_evict := 600000; -- eviction table -SET @chunk_size := 50000; -- rows per batch insert +-- 4. Configuration knobs (adjust as needed) +SET @rows_big := 800000; -- target rows in compressed table +SET @rows_evict := 600000; -- target rows in eviction table +SET @chunk_size := 50000; -- rows per batch insert (must divide cleanly into generation set: 100*500=50,000) +SET @note_pad := 1200; -- length of compressible notes +SET @fill_pad := 800; -- length of filler column --- Build helper numbers tables WITHOUT ambiguous column names -DROP TEMPORARY TABLE IF EXISTS nums10; -CREATE TEMPORARY TABLE nums10 (d TINYINT UNSIGNED PRIMARY KEY); -INSERT INTO nums10(d) VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); - --- nums100: 0..99 +-- 5. Helper number tables (nums100: 0..99, nums500: 0..499) DROP TEMPORARY TABLE IF EXISTS nums100; -CREATE TEMPORARY TABLE nums100 (n INT PRIMARY KEY); -INSERT INTO nums100(n) -SELECT t1.d*10 + t2.d -FROM nums10 t1 -CROSS JOIN nums10 t2 -ORDER BY 1; - --- nums500: 0..499 (use three digits: a*100 + b*10 + c) +CREATE TEMPORARY TABLE nums100 (n INT PRIMARY KEY) ENGINE=Memory; +SET @i := 0; +WHILE @i < 100 DO + INSERT INTO nums100 VALUES (@i); + SET @i := @i + 1; +END WHILE; + DROP TEMPORARY TABLE IF EXISTS nums500; -CREATE TEMPORARY TABLE nums500 (n INT PRIMARY KEY); -INSERT INTO nums500(n) -SELECT a.d*100 + b.d*10 + c.d -FROM nums10 a -CROSS JOIN nums10 b -CROSS JOIN nums10 c -WHERE a.d*100 + b.d*10 + c.d < 500 -ORDER BY 1; - --- Procedure: fill compressed table in chunks +CREATE TEMPORARY TABLE nums500 (n INT PRIMARY KEY) ENGINE=Memory; +SET @i := 0; +WHILE @i < 500 DO + INSERT INTO nums500 VALUES (@i); + SET @i := @i + 1; +END WHILE; + +-- 6. Procedures for chunked loading (no ambiguous aliases) DELIMITER $$ CREATE PROCEDURE fill_big_students(IN total INT, IN chunk INT) BEGIN @@ -154,27 +152,24 @@ BEGIN WHILE inserted < total DO INSERT INTO big_students (pad1, pad2, notes, filler) SELECT - CONCAT('P1_', inserted + (a.n*500) + b.n) AS pad1, - CONCAT('P2_', inserted + (a.n*500) + b.n) AS pad2, - RPAD('COMPRESSIBLE_', 1200, 'COMPRESSIBLE_') AS notes, - RPAD('FILL', 800, 'FILL') AS filler + CONCAT('P1_', inserted + a.n * 500 + b.n), + CONCAT('P2_', inserted + a.n * 500 + b.n), + RPAD('COMPRESSIBLE_', @note_pad, 'COMPRESSIBLE_'), + RPAD('FILL', @fill_pad, 'FILL') FROM nums100 a - JOIN nums500 b -- 100 * 500 = 50,000 rows per batch + JOIN nums500 b LIMIT chunk; SET inserted = inserted + chunk; END WHILE; END$$ -DELIMITER ; --- Procedure: fill eviction table in chunks (less compressible) -DELIMITER $$ CREATE PROCEDURE fill_evict_buffer(IN total INT, IN chunk INT) BEGIN DECLARE inserted INT DEFAULT 0; WHILE inserted < total DO INSERT INTO evict_buffer (junk, blobdata) SELECT - CONCAT('J', inserted + (a.n*500) + b.n), + CONCAT('J', inserted + a.n * 500 + b.n), CONCAT(MD5(RAND()), '_', MD5(RAND()), '_', RPAD(MD5(RAND()), 300, 'Z')) FROM nums100 a JOIN nums500 b @@ -184,19 +179,19 @@ BEGIN END$$ DELIMITER ; --- Execute loaders +-- 7. Execute loaders CALL fill_big_students(@rows_big, @chunk_size); CALL fill_evict_buffer(@rows_evict, @chunk_size); --- Verify compressed row format +-- 8. Verify row formats SHOW TABLE STATUS LIKE 'big_students'\G SHOW TABLE STATUS LIKE 'evict_buffer'\G --- Baseline counters +-- 9. Baseline compression counters SELECT PAGE_SIZE, COMPRESS_OPS, COMPRESS_TIME, UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; --- Churn procedure +-- 10. Churn procedure (interleaved reads/writes) DELIMITER $$ CREATE PROCEDURE churn(IN loops INT) BEGIN @@ -210,20 +205,20 @@ BEGIN FROM big_students WHERE id BETWEEN rstart AND rstart + 150; - -- Eviction range read on dynamic table + -- Eviction range read SET rstart = FLOOR(RAND() * @rows_evict) + 1; SELECT SQL_NO_CACHE COUNT(*) INTO dummy FROM evict_buffer WHERE id BETWEEN rstart AND rstart + 3000; - -- Occasional larger slice + -- Larger slice to push more buffer churn IF (i % 40 = 0) THEN SELECT SQL_NO_CACHE AVG(id) INTO dummy FROM evict_buffer WHERE id BETWEEN rstart AND rstart + 30000; END IF; - -- Update chunk on compressed table (forces page access/rewrite) + -- Update segment in compressed table (write + possible read) IF (i % 25 = 0) THEN UPDATE big_students SET filler = CONCAT(filler, 'X') @@ -235,16 +230,20 @@ BEGIN END$$ DELIMITER ; --- Run churn passes (increase loops if needed) +-- 11. Run churn passes (raise loops if needed) CALL churn(3000); CALL churn(3000); CALL churn(4000); --- Check counters again +-- 12. Re-check counters SELECT PAGE_SIZE, COMPRESS_OPS, COMPRESS_TIME, UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; --- Optional rebuild & extra churn +-- 13. Optional: Rebuild & churn again to create fresh compressed pages -- ALTER TABLE big_students FORCE; -- CALL churn(4000); -- SELECT UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; + +-- 14. Optional reset (for repeated experiments) +-- SELECT * FROM information_schema.INNODB_CMP_RESET; +-- TRUNCATE TABLE information_schema.INNODB_CMP_RESET; From cd47227ee79faa1e9be393f0046e0d6c61dae60d Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Tue, 25 Nov 2025 08:21:30 +0100 Subject: [PATCH 34/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 179 +++++-------------------------------- 1 file changed, 24 insertions(+), 155 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 879f948c..36e0b4d8 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -83,6 +83,30 @@ UPDATE classes SET teacher = 'Ms. Carter' WHERE name = 'History'; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; +INSERT INTO students SELECT * FROM students; + -- ======================================== -- DELETE DATA -- ======================================== @@ -92,158 +116,3 @@ WHERE student_id = (SELECT student_id FROM students WHERE first_name = 'Alice' A DELETE FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'; - --- ======================================== --- AGGRESSIVE COMPRESSION METRIC CHURN (Stable Version) --- ======================================== - --- 0. (Optional) Verify buffer pool size; use it to size tables -SHOW VARIABLES LIKE 'innodb_buffer_pool_size'; - --- 1. Drop if exist -DROP TABLE IF EXISTS big_students; -DROP TABLE IF EXISTS evict_buffer; - --- 2. Create large compressed table -CREATE TABLE big_students ( - id INT AUTO_INCREMENT PRIMARY KEY, - pad1 VARCHAR(100), - pad2 VARCHAR(100), - notes TEXT, - filler TEXT -) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; - --- 3. Create large dynamic (uncompressed) eviction table -CREATE TABLE evict_buffer ( - id INT AUTO_INCREMENT PRIMARY KEY, - junk VARCHAR(100), - blobdata TEXT -) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; - --- 4. Configuration knobs (adjust as needed) -SET @rows_big := 800000; -- target rows in compressed table -SET @rows_evict := 600000; -- target rows in eviction table -SET @chunk_size := 50000; -- rows per batch insert (must divide cleanly into generation set: 100*500=50,000) -SET @note_pad := 1200; -- length of compressible notes -SET @fill_pad := 800; -- length of filler column - --- 5. Helper number tables (nums100: 0..99, nums500: 0..499) -DROP TEMPORARY TABLE IF EXISTS nums100; -CREATE TEMPORARY TABLE nums100 (n INT PRIMARY KEY) ENGINE=Memory; -SET @i := 0; -WHILE @i < 100 DO - INSERT INTO nums100 VALUES (@i); - SET @i := @i + 1; -END WHILE; - -DROP TEMPORARY TABLE IF EXISTS nums500; -CREATE TEMPORARY TABLE nums500 (n INT PRIMARY KEY) ENGINE=Memory; -SET @i := 0; -WHILE @i < 500 DO - INSERT INTO nums500 VALUES (@i); - SET @i := @i + 1; -END WHILE; - --- 6. Procedures for chunked loading (no ambiguous aliases) -DELIMITER $$ -CREATE PROCEDURE fill_big_students(IN total INT, IN chunk INT) -BEGIN - DECLARE inserted INT DEFAULT 0; - WHILE inserted < total DO - INSERT INTO big_students (pad1, pad2, notes, filler) - SELECT - CONCAT('P1_', inserted + a.n * 500 + b.n), - CONCAT('P2_', inserted + a.n * 500 + b.n), - RPAD('COMPRESSIBLE_', @note_pad, 'COMPRESSIBLE_'), - RPAD('FILL', @fill_pad, 'FILL') - FROM nums100 a - JOIN nums500 b - LIMIT chunk; - SET inserted = inserted + chunk; - END WHILE; -END$$ - -CREATE PROCEDURE fill_evict_buffer(IN total INT, IN chunk INT) -BEGIN - DECLARE inserted INT DEFAULT 0; - WHILE inserted < total DO - INSERT INTO evict_buffer (junk, blobdata) - SELECT - CONCAT('J', inserted + a.n * 500 + b.n), - CONCAT(MD5(RAND()), '_', MD5(RAND()), '_', RPAD(MD5(RAND()), 300, 'Z')) - FROM nums100 a - JOIN nums500 b - LIMIT chunk; - SET inserted = inserted + chunk; - END WHILE; -END$$ -DELIMITER ; - --- 7. Execute loaders -CALL fill_big_students(@rows_big, @chunk_size); -CALL fill_evict_buffer(@rows_evict, @chunk_size); - --- 8. Verify row formats -SHOW TABLE STATUS LIKE 'big_students'\G -SHOW TABLE STATUS LIKE 'evict_buffer'\G - --- 9. Baseline compression counters -SELECT PAGE_SIZE, COMPRESS_OPS, COMPRESS_TIME, UNCOMPRESS_OPS, UNCOMPRESS_TIME -FROM information_schema.INNODB_CMP; - --- 10. Churn procedure (interleaved reads/writes) -DELIMITER $$ -CREATE PROCEDURE churn(IN loops INT) -BEGIN - DECLARE i INT DEFAULT 0; - DECLARE rstart INT; - DECLARE dummy BIGINT; - WHILE i < loops DO - -- Random range read on compressed table - SET rstart = FLOOR(RAND() * @rows_big) + 1; - SELECT SQL_NO_CACHE SUM(id) INTO dummy - FROM big_students - WHERE id BETWEEN rstart AND rstart + 150; - - -- Eviction range read - SET rstart = FLOOR(RAND() * @rows_evict) + 1; - SELECT SQL_NO_CACHE COUNT(*) INTO dummy - FROM evict_buffer - WHERE id BETWEEN rstart AND rstart + 3000; - - -- Larger slice to push more buffer churn - IF (i % 40 = 0) THEN - SELECT SQL_NO_CACHE AVG(id) INTO dummy - FROM evict_buffer - WHERE id BETWEEN rstart AND rstart + 30000; - END IF; - - -- Update segment in compressed table (write + possible read) - IF (i % 25 = 0) THEN - UPDATE big_students - SET filler = CONCAT(filler, 'X') - WHERE id BETWEEN rstart AND rstart + 180; - END IF; - - SET i = i + 1; - END WHILE; -END$$ -DELIMITER ; - --- 11. Run churn passes (raise loops if needed) -CALL churn(3000); -CALL churn(3000); -CALL churn(4000); - --- 12. Re-check counters -SELECT PAGE_SIZE, COMPRESS_OPS, COMPRESS_TIME, UNCOMPRESS_OPS, UNCOMPRESS_TIME -FROM information_schema.INNODB_CMP; - --- 13. Optional: Rebuild & churn again to create fresh compressed pages --- ALTER TABLE big_students FORCE; --- CALL churn(4000); --- SELECT UNCOMPRESS_OPS, UNCOMPRESS_TIME FROM information_schema.INNODB_CMP; - --- 14. Optional reset (for repeated experiments) --- SELECT * FROM information_schema.INNODB_CMP_RESET; --- TRUNCATE TABLE information_schema.INNODB_CMP_RESET; From 53197e5b0586edde297223ecd986e799b25266f2 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Tue, 25 Nov 2025 08:34:31 +0100 Subject: [PATCH 35/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 114 +++++++++++++++++++++++++++++-------- 1 file changed, 90 insertions(+), 24 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 36e0b4d8..aede4fa9 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -83,30 +83,6 @@ UPDATE classes SET teacher = 'Ms. Carter' WHERE name = 'History'; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; -INSERT INTO students SELECT * FROM students; - -- ======================================== -- DELETE DATA -- ======================================== @@ -116,3 +92,93 @@ WHERE student_id = (SELECT student_id FROM students WHERE first_name = 'Alice' A DELETE FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'; + +-- A compressed table with ~1KB rows +CREATE TABLE students_big ( + id INT AUTO_INCREMENT PRIMARY KEY, + first_name VARCHAR(50), + last_name VARCHAR(50), + birth_date DATE, + bio TEXT, -- larger field to fill pages + notes TEXT, + filler VARBINARY(256), -- binary helps mixed page content + INDEX (last_name), + INDEX (birth_date) +) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; + +-- A second table to diversify compression across indexes +CREATE TABLE students_big2 LIKE students_big; +ALTER TABLE students_big2 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; + +-- Optional: smaller row table to vary page shapes +CREATE TABLE students_small ( + id INT AUTO_INCREMENT PRIMARY KEY, + first_name VARCHAR(50), + last_name VARCHAR(50), + birth_date DATE, + INDEX (last_name) +) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; + +-- Seed 200k rows quickly (repeat the INSERT SELECT pattern to reach ~1M if desired) +INSERT INTO students_big (first_name, last_name, birth_date, bio, notes, filler) +SELECT + CONCAT('FN', LPAD(i, 6, '0')), + CONCAT('LN', LPAD(i*13 % 1000000, 6, '0')), + DATE_ADD('1970-01-01', INTERVAL (i*37 % 18628) DAY), + REPEAT('BIO_', 50), + REPEAT('NOTE_', 30), + RANDOM_BYTES(256) +FROM ( + SELECT @row := @row + 1 AS i + FROM (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t0, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t2, + (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 + UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t3, + (SELECT @row:=0) init +) gen +LIMIT 200000; + +INSERT INTO students_big2 SELECT * FROM students_big; +INSERT INTO students_small (first_name, last_name, birth_date) +SELECT first_name, last_name, birth_date FROM students_big LIMIT 200000; + +-- Enable events +SET GLOBAL event_scheduler = ON; + +-- Create an event that runs every 5 seconds +DROP EVENT IF EXISTS ev_compress_load; +CREATE EVENT ev_compress_load +ON SCHEDULE EVERY 5 SECOND +DO +BEGIN + -- Inserts: add ~2k rows + INSERT INTO students_big (first_name, last_name, birth_date, bio, notes, filler) + SELECT + CONCAT('FNX', UUID()), + CONCAT('LNX', UUID()), + DATE_ADD('1970-01-01', INTERVAL FLOOR(RAND()*18628) DAY), + REPEAT('BIO_', FLOOR(20+RAND()*60)), + REPEAT('NOTE_', FLOOR(10+RAND()*40)), + RANDOM_BYTES(256) + FROM information_schema.columns LIMIT 2000; -- cheap row generator + + -- Updates: touch rows across pages, triggers page rewrites + UPDATE students_big + SET bio = CONCAT(bio, 'U'), notes = CONCAT(notes, 'U') + WHERE id % 37 = 0 + LIMIT 2000; + + -- Deletes: free space and cause merges under compression + DELETE FROM students_big WHERE id % 101 = 0 LIMIT 1000; + + -- Periodically force re-compression (lightweight, but adds activity) + IF (UNIX_TIMESTAMP() % 60) < 5 THEN + OPTIMIZE TABLE students_big; + OPTIMIZE TABLE students_big2; + END IF; +END; + From 79d7a772f8710b6ba6d41f81514f359061054ddc Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Tue, 25 Nov 2025 08:53:12 +0100 Subject: [PATCH 36/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 1 - 1 file changed, 1 deletion(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index aede4fa9..576739cb 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -181,4 +181,3 @@ BEGIN OPTIMIZE TABLE students_big2; END IF; END; - From 73aa4c18d7ba9a558b8c7c2a78de9fcc240a94ca Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Tue, 25 Nov 2025 09:02:04 +0100 Subject: [PATCH 37/39] PMM-13992: Bigger test data --- pmm_qa/data/mysql_load.sql | 72 ++++++++++++++++++++++++++------------ 1 file changed, 49 insertions(+), 23 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 576739cb..3937125a 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -93,25 +93,26 @@ WHERE student_id = (SELECT student_id FROM students WHERE first_name = 'Alice' A DELETE FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'; --- A compressed table with ~1KB rows -CREATE TABLE students_big ( +-- ======================================== +-- BIG COMPRESSED TABLES FOR LOAD GENERATION +-- ======================================== + +CREATE TABLE IF NOT EXISTS students_big ( id INT AUTO_INCREMENT PRIMARY KEY, first_name VARCHAR(50), last_name VARCHAR(50), birth_date DATE, - bio TEXT, -- larger field to fill pages + bio TEXT, notes TEXT, - filler VARBINARY(256), -- binary helps mixed page content + filler VARBINARY(256), INDEX (last_name), INDEX (birth_date) ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; --- A second table to diversify compression across indexes -CREATE TABLE students_big2 LIKE students_big; +CREATE TABLE IF NOT EXISTS students_big2 LIKE students_big; ALTER TABLE students_big2 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; --- Optional: smaller row table to vary page shapes -CREATE TABLE students_small ( +CREATE TABLE IF NOT EXISTS students_small ( id INT AUTO_INCREMENT PRIMARY KEY, first_name VARCHAR(50), last_name VARCHAR(50), @@ -119,7 +120,7 @@ CREATE TABLE students_small ( INDEX (last_name) ) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; --- Seed 200k rows quickly (repeat the INSERT SELECT pattern to reach ~1M if desired) +-- Seed 200k rows (run once). If already done, skip. INSERT INTO students_big (first_name, last_name, birth_date, bio, notes, filler) SELECT CONCAT('FN', LPAD(i, 6, '0')), @@ -142,42 +143,67 @@ FROM ( ) gen LIMIT 200000; -INSERT INTO students_big2 SELECT * FROM students_big; +INSERT INTO students_big2 +SELECT * FROM students_big; + INSERT INTO students_small (first_name, last_name, birth_date) SELECT first_name, last_name, birth_date FROM students_big LIMIT 200000; --- Enable events +-- ======================================== +-- EVENT TO GENERATE CONTINUOUS COMPRESSION WORK +-- ======================================== + +-- Make sure the event scheduler is on: SET GLOBAL event_scheduler = ON; --- Create an event that runs every 5 seconds -DROP EVENT IF EXISTS ev_compress_load; +-- Change delimiter to allow compound statement +DELIMITER // + +DROP EVENT IF EXISTS ev_compress_load// + CREATE EVENT ev_compress_load ON SCHEDULE EVERY 5 SECOND +ON COMPLETION PRESERVE DO BEGIN - -- Inserts: add ~2k rows + -- Inserts (~2k rows per run) INSERT INTO students_big (first_name, last_name, birth_date, bio, notes, filler) SELECT CONCAT('FNX', UUID()), CONCAT('LNX', UUID()), DATE_ADD('1970-01-01', INTERVAL FLOOR(RAND()*18628) DAY), - REPEAT('BIO_', FLOOR(20+RAND()*60)), - REPEAT('NOTE_', FLOOR(10+RAND()*40)), + REPEAT('BIO_', FLOOR(20 + RAND()*60)), + REPEAT('NOTE_', FLOOR(10 + RAND()*40)), RANDOM_BYTES(256) - FROM information_schema.columns LIMIT 2000; -- cheap row generator + FROM information_schema.columns + LIMIT 2000; - -- Updates: touch rows across pages, triggers page rewrites + -- Updates (touch pages) UPDATE students_big - SET bio = CONCAT(bio, 'U'), notes = CONCAT(notes, 'U') + SET bio = CONCAT(bio, 'U'), + notes = CONCAT(notes, 'U') WHERE id % 37 = 0 LIMIT 2000; - -- Deletes: free space and cause merges under compression - DELETE FROM students_big WHERE id % 101 = 0 LIMIT 1000; + -- Deletes (free space for merges) + DELETE FROM students_big + WHERE id % 101 = 0 + LIMIT 1000; - -- Periodically force re-compression (lightweight, but adds activity) + -- Periodic re-compression IF (UNIX_TIMESTAMP() % 60) < 5 THEN OPTIMIZE TABLE students_big; OPTIMIZE TABLE students_big2; END IF; -END; +END// + +DELIMITER ; + +-- ======================================== +-- VERIFICATION QUERIES +-- ======================================== +-- Check that event is created and enabled +SHOW EVENTS LIKE 'ev_compress_load'; + +-- Check a few rows +SELECT COUNT(*) FROM students_big; \ No newline at end of file From de1b240fff02614528a254c4ff3fc2314468d1b5 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Tue, 25 Nov 2025 10:18:05 +0100 Subject: [PATCH 38/39] PMM-13992: add mysqldump to path --- pmm_qa/mysql/tasks/prepare_install_mysql.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/pmm_qa/mysql/tasks/prepare_install_mysql.yml b/pmm_qa/mysql/tasks/prepare_install_mysql.yml index 96366689..7867f965 100644 --- a/pmm_qa/mysql/tasks/prepare_install_mysql.yml +++ b/pmm_qa/mysql/tasks/prepare_install_mysql.yml @@ -82,6 +82,7 @@ docker exec {{ container_prefix }}{{ index }} chmod +x /etc/init.d/mysql docker exec {{ container_prefix }}{{ index }} sh -c "echo 'export PATH=/usr/local/mysql/bin:\$PATH' >> /etc/profile" docker exec {{ container_prefix }}{{ index }} ln -s /usr/local/mysql/bin/mysql /usr/bin/mysql + docker exec {{ container_prefix }}{{ index }} ln -s /usr/local/mysql/bin/mysqldump /usr/bin/mysqldump docker exec {{ container_prefix }}{{ index }} bash -c 'source ~/.bashrc' register: mysql_57_root_password when: mysql_version | replace('_', '') | int == 57 From a39402e31af334c2cc5b72d845f248c2c0d63ad9 Mon Sep 17 00:00:00 2001 From: Peter Sirotnak Date: Wed, 26 Nov 2025 11:30:19 +0100 Subject: [PATCH 39/39] PMM-13992: Remove data generation --- pmm_qa/data/mysql_load.sql | 115 ------------------------------------- 1 file changed, 115 deletions(-) diff --git a/pmm_qa/data/mysql_load.sql b/pmm_qa/data/mysql_load.sql index 3937125a..a98191a0 100644 --- a/pmm_qa/data/mysql_load.sql +++ b/pmm_qa/data/mysql_load.sql @@ -92,118 +92,3 @@ WHERE student_id = (SELECT student_id FROM students WHERE first_name = 'Alice' A DELETE FROM students WHERE first_name = 'Alice' AND last_name = 'Smith'; - --- ======================================== --- BIG COMPRESSED TABLES FOR LOAD GENERATION --- ======================================== - -CREATE TABLE IF NOT EXISTS students_big ( - id INT AUTO_INCREMENT PRIMARY KEY, - first_name VARCHAR(50), - last_name VARCHAR(50), - birth_date DATE, - bio TEXT, - notes TEXT, - filler VARBINARY(256), - INDEX (last_name), - INDEX (birth_date) -) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; - -CREATE TABLE IF NOT EXISTS students_big2 LIKE students_big; -ALTER TABLE students_big2 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; - -CREATE TABLE IF NOT EXISTS students_small ( - id INT AUTO_INCREMENT PRIMARY KEY, - first_name VARCHAR(50), - last_name VARCHAR(50), - birth_date DATE, - INDEX (last_name) -) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; - --- Seed 200k rows (run once). If already done, skip. -INSERT INTO students_big (first_name, last_name, birth_date, bio, notes, filler) -SELECT - CONCAT('FN', LPAD(i, 6, '0')), - CONCAT('LN', LPAD(i*13 % 1000000, 6, '0')), - DATE_ADD('1970-01-01', INTERVAL (i*37 % 18628) DAY), - REPEAT('BIO_', 50), - REPEAT('NOTE_', 30), - RANDOM_BYTES(256) -FROM ( - SELECT @row := @row + 1 AS i - FROM (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t0, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t1, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t2, - (SELECT 0 UNION ALL SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL SELECT 4 - UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9) t3, - (SELECT @row:=0) init -) gen -LIMIT 200000; - -INSERT INTO students_big2 -SELECT * FROM students_big; - -INSERT INTO students_small (first_name, last_name, birth_date) -SELECT first_name, last_name, birth_date FROM students_big LIMIT 200000; - --- ======================================== --- EVENT TO GENERATE CONTINUOUS COMPRESSION WORK --- ======================================== - --- Make sure the event scheduler is on: -SET GLOBAL event_scheduler = ON; - --- Change delimiter to allow compound statement -DELIMITER // - -DROP EVENT IF EXISTS ev_compress_load// - -CREATE EVENT ev_compress_load -ON SCHEDULE EVERY 5 SECOND -ON COMPLETION PRESERVE -DO -BEGIN - -- Inserts (~2k rows per run) - INSERT INTO students_big (first_name, last_name, birth_date, bio, notes, filler) - SELECT - CONCAT('FNX', UUID()), - CONCAT('LNX', UUID()), - DATE_ADD('1970-01-01', INTERVAL FLOOR(RAND()*18628) DAY), - REPEAT('BIO_', FLOOR(20 + RAND()*60)), - REPEAT('NOTE_', FLOOR(10 + RAND()*40)), - RANDOM_BYTES(256) - FROM information_schema.columns - LIMIT 2000; - - -- Updates (touch pages) - UPDATE students_big - SET bio = CONCAT(bio, 'U'), - notes = CONCAT(notes, 'U') - WHERE id % 37 = 0 - LIMIT 2000; - - -- Deletes (free space for merges) - DELETE FROM students_big - WHERE id % 101 = 0 - LIMIT 1000; - - -- Periodic re-compression - IF (UNIX_TIMESTAMP() % 60) < 5 THEN - OPTIMIZE TABLE students_big; - OPTIMIZE TABLE students_big2; - END IF; -END// - -DELIMITER ; - --- ======================================== --- VERIFICATION QUERIES --- ======================================== --- Check that event is created and enabled -SHOW EVENTS LIKE 'ev_compress_load'; - --- Check a few rows -SELECT COUNT(*) FROM students_big; \ No newline at end of file