Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
169 changes: 98 additions & 71 deletions test/functional/tests/data_integrity/test_data_integrity_5d.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,13 @@
from api.cas.cache_config import CacheMode
from api.cas.ioclass_config import IoClass
from core.test_run import TestRun
from test_tools.fs_tools import Filesystem, create_directory, check_if_directory_exists, \
read_file, crc32sum
from test_tools.fs_tools import (
Filesystem,
create_directory,
check_if_directory_exists,
read_file,
crc32sum,
)
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine, VerifyMethod
from storage_devices.disk import DiskType, DiskTypeSet, DiskTypeLowerThan
Expand Down Expand Up @@ -53,11 +58,13 @@ def test_data_integrity_5d_with_io_classification(filesystems):
runtime = datetime.timedelta(days=5)

with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks['cache']
core_devices = [TestRun.disks['core1'],
TestRun.disks['core2'],
TestRun.disks['core3'],
TestRun.disks['core4']]
cache_device = TestRun.disks["cache"]
core_devices = [
TestRun.disks["core1"],
TestRun.disks["core2"],
TestRun.disks["core3"],
TestRun.disks["core4"],
]

cache_device.create_partitions([Size(50, Unit.GibiByte)] * len(core_devices))

Expand All @@ -76,8 +83,7 @@ def test_data_integrity_5d_with_io_classification(filesystems):

with TestRun.step("Add one core to each cache"):
cores = [
cache.add_core(core_dev=core_part)
for cache, core_part in zip(caches, core_partitions)
cache.add_core(core_dev=core_part) for cache, core_part in zip(caches, core_partitions)
]

with TestRun.step("Load default I/O class config for each cache"):
Expand All @@ -90,41 +96,52 @@ def test_data_integrity_5d_with_io_classification(filesystems):

with TestRun.step("Mount cached volumes"):
for core in cores:
mount_point = core.path.replace('/dev/', '/mnt/')
if not check_if_directory_exists(mount_point):
create_directory(mount_point)
core.mount(mount_point)
mount_point = core.path.replace("/dev/", "/mnt/")
if not check_if_directory_exists(path=mount_point):
create_directory(path=mount_point)
core.mount(mount_point=mount_point)
sync()

with TestRun.step("Prepare fio workload config"):
with TestRun.step("Prepare I/O workload config"):
template_io_classes = IoClass.csv_to_list(read_file(template_config_path))
config_max_file_sizes = [
int(re.search(r'\d+', io_class.rule).group())
for io_class in template_io_classes if io_class.rule.startswith("file_size:le")
int(re.search(r"\d+", io_class.rule).group())
for io_class in template_io_classes
if io_class.rule.startswith("file_size:le")
]
config_max_file_sizes.append(config_max_file_sizes[-1] * 2)
io_class_section_size = Size(
int(core_size.get_value(Unit.GibiByte) / len(config_max_file_sizes)),
Unit.GibiByte
int(core_size.get_value(Unit.GibiByte) / len(config_max_file_sizes)), Unit.GibiByte
)

fio = Fio()
fio_run = fio.create_command()
fio.base_cmd_parameters.set_param(
'alloc-size', int(Size(1, Unit.GiB).get_value(Unit.KiB))
'''
Fio has a bug when verifying randrw.
If someone would like to switch to randrw in the future,
Fio will return the error "verify: bad header rand_seed ..., wanted ..."
It's recommended to verify the status of bug https://github.com/axboe/fio/issues/1049
in that case.

'''

fio_run = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.direct()
.time_based()
.do_verify()
.verify(VerifyMethod.md5)
.verify_dump()
.run_time(runtime)
.read_write(ReadWrite.randwrite)
.open_files(1000)
.io_depth(128)
.blocksize_range(
[(Size(512, Unit.Byte).get_value(), Size(128, Unit.KibiByte).get_value())]
)
)

fio_run.io_engine(IoEngine.libaio)
fio_run.direct()
fio_run.time_based()
fio_run.do_verify()
fio_run.verify(VerifyMethod.md5)
fio_run.verify_dump()
fio_run.run_time(runtime)
fio_run.read_write(ReadWrite.randrw)
fio_run.io_depth(128)
fio_run.blocksize_range(
[(Size(512, Unit.Byte).get_value(), Size(128, Unit.KibiByte).get_value())]
fio_run.fio.base_cmd_parameters.set_param(
"alloc-size", int(Size(1, Unit.GibiByte).get_value(Unit.KibiByte))
)

for core in cores:
Expand All @@ -146,23 +163,25 @@ def test_data_integrity_5d_with_io_classification(filesystems):
for core in cores:
core.unmount()

with TestRun.step("Calculate crc32 for each core"):
core_crc32s = [crc32sum(core.path, timeout=timedelta(hours=4)) for core in cores]
with TestRun.step("Calculate checksum for each core"):
core_crc32s = [crc32sum(file=core.path, timeout=timedelta(hours=4)) for core in cores]

with TestRun.step("Stop caches"):
for cache in caches:
cache.stop()

with TestRun.step("Calculate crc32 for each core"):
dev_crc32s = [crc32sum(dev.path, timeout=timedelta(hours=4)) for dev in core_devices]
with TestRun.step("Calculate checksum for each core device"):
dev_crc32s = [crc32sum(file=dev.path, timeout=timedelta(hours=4)) for dev in core_devices]

with TestRun.step("Compare crc32 sums for cores and core devices"):
for core_crc32, dev_crc32, mode, fs in zip(
core_crc32s, dev_crc32s, cache_modes, filesystems
with TestRun.step("Compare checksum for cores and core devices"):
for core_crc32, dev_crc32, cache_mode, filesystem in zip(
core_crc32s, dev_crc32s, cache_modes, filesystems
):
if core_crc32 != dev_crc32:
TestRun.fail("Crc32 sums of core and core device do not match! "
f"Cache mode: {mode} Filesystem: {fs}")
TestRun.fail(
"Checksum of core and core device do not match!\n"
f"Cache mode: {cache_mode} Filesystem: {filesystem}"
)


@pytest.mark.os_dependent
Expand All @@ -186,11 +205,13 @@ def test_data_integrity_5d():
runtime = datetime.timedelta(days=5)

with TestRun.step("Prepare cache and core devices"):
cache_device = TestRun.disks['cache']
core_devices = [TestRun.disks['core1'],
TestRun.disks['core2'],
TestRun.disks['core3'],
TestRun.disks['core4']]
cache_device = TestRun.disks["cache"]
core_devices = [
TestRun.disks["core1"],
TestRun.disks["core2"],
TestRun.disks["core3"],
TestRun.disks["core4"],
]

cache_device.create_partitions([Size(50, Unit.GibiByte)] * len(core_devices))

Expand All @@ -202,49 +223,55 @@ def test_data_integrity_5d():

with TestRun.step("Start caches, each in different cache mode"):
caches = [
casadm.start_cache(cache_device, cache_mode, force=True)
casadm.start_cache(cache_dev=cache_device, cache_mode=cache_mode, force=True)
for cache_device, cache_mode in zip(cache_device.partitions, cache_modes)
]

with TestRun.step("Add one core to each cache"):
cores = [
casadm.add_core(cache, core_dev=core_device)
casadm.add_core(cache=cache, core_dev=core_device)
for cache, core_device in zip(caches, core_devices)
]

with TestRun.step("Prepare fio workload config"):
fio_run = Fio().create_command()
fio_run.io_engine(IoEngine.libaio)
fio_run.direct()
fio_run.time_based()
fio_run.do_verify()
fio_run.verify(VerifyMethod.md5)
fio_run.verify_dump()
fio_run.run_time(runtime)
fio_run.read_write(ReadWrite.randrw)
fio_run.io_depth(128)
fio_run.blocksize_range(
[(Size(512, Unit.Byte).get_value(), Size(128, Unit.KibiByte).get_value())]
with TestRun.step("Prepare I/O workload config"):
fio_run = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.direct()
.time_based()
.do_verify()
.verify(VerifyMethod.md5)
.verify_dump()
.run_time(runtime)
.read_write(ReadWrite.randrw)
.io_depth(128)
.blocksize_range(
[(Size(512, Unit.Byte).get_value(), Size(128, Unit.KibiByte).get_value())]
)
)

for core in cores:
fio_job = fio_run.add_job()
fio_job.target(core)

with TestRun.step("Run test workload with data verification"):
fio_run.run(fio_timeout=runtime + datetime.timedelta(hours=2))

with TestRun.step("Calculate crc32 for each core"):
core_crc32s = [crc32sum(core.path, timeout=timedelta(hours=4)) for core in cores]
with TestRun.step("Calculate checksum for each core"):
core_crc32s = [crc32sum(file=core.path, timeout=timedelta(hours=4)) for core in cores]

with TestRun.step("Stop caches"):
for cache in caches:
cache.stop()

with TestRun.step("Calculate crc32 for each core"):
dev_crc32s = [crc32sum(dev.path, timeout=timedelta(hours=4)) for dev in core_devices]
with TestRun.step("Calculate checksum for each core device"):
dev_crc32s = [crc32sum(file=dev.path, timeout=timedelta(hours=4)) for dev in core_devices]

with TestRun.step("Compare crc32 sums for cores and core devices"):
for core_crc32, dev_crc32, mode in zip(core_crc32s, dev_crc32s, cache_modes):
with TestRun.step("Compare checksum for cores and core devices"):
for core_crc32, dev_crc32, cache_mode in zip(core_crc32s, dev_crc32s, cache_modes):
if core_crc32 != dev_crc32:
TestRun.fail("Crc32 sums of core and core device do not match! "
f"Cache mode: {mode}")
TestRun.fail(
"Checksum sums of core and core device do not match!\n"
f"Cache mode with wrong checksum: {cache_mode}"
)
64 changes: 41 additions & 23 deletions test/functional/tests/io/trim/test_trim.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#
# Copyright(c) 2020-2022 Intel Corporation
# Copyright(c) 2024 Huawei Technologies Co., Ltd.
# Copyright(c) 2024-2025 Huawei Technologies Co., Ltd.
# SPDX-License-Identifier: BSD-3-Clause
#

Expand Down Expand Up @@ -36,12 +36,13 @@ def test_trim_start_discard():
- Partition used for cache is discarded.
- Second partition is untouched - written pattern is preserved.
"""

with TestRun.step("Clearing dmesg"):
TestRun.executor.run_expect_success("dmesg -C")

with TestRun.step("Preparing cache device"):
dev = TestRun.disks["cache"]
dev.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
dev.create_partitions([Size(500, Unit.MebiByte)] * 2)
cas_part = dev.partitions[0]
non_cas_part = dev.partitions[1]

Expand Down Expand Up @@ -116,7 +117,7 @@ def test_trim_propagation():
- No data corruption after power failure.
"""

with TestRun.step(f"Create partitions"):
with TestRun.step("Prepare cache and core devices"):
TestRun.disks["ssd1"].create_partitions([Size(43, Unit.MegaByte)])
TestRun.disks["ssd2"].create_partitions([Size(512, Unit.KiloByte)])

Expand All @@ -131,36 +132,52 @@ def test_trim_propagation():
with TestRun.step(f"Disable udev"):
Udev.disable()

with TestRun.step(f"Prepare cache instance in WB with one core"):
with TestRun.step("Start cache in Write-Back mode and add core device"):
cache = casadm.start_cache(cache_dev, CacheMode.WB, force=True)
core = cache.add_core(core_dev)
cache.set_cleaning_policy(CleaningPolicy.nop)

with TestRun.step("Disable cleaning policy and sequential cutoff"):
cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
cache.set_cleaning_policy(CleaningPolicy.nop)

with TestRun.step("Purge cache and reset cache counters"):
cache.purge_cache()
cache.reset_counters()

with TestRun.step(f"Fill exported object with dirty data"):
with TestRun.step("Run I/O to fill exported object with dirty data"):
core_size_4k = core.get_statistics().config_stats.core_size.get_value(Unit.Blocks4096)
core_size_4k = int(core_size_4k)

cas_fio = write_pattern(core.path)
cas_fio.verification_with_pattern("0xdeadbeef")
cas_fio.run()
fio = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.read_write(ReadWrite.write)
.target(core.path)
.direct()
.verification_with_pattern()
)
fio.verification_with_pattern("0xdeadbeef")

dirty_4k = cache.get_statistics().usage_stats.dirty.get_value(Unit.Blocks4096)
fio.run()

with TestRun.step("Check if exported object was filled with dirty data"):
dirty_4k = cache.get_statistics().usage_stats.dirty.get_value(Unit.Blocks4096)
if dirty_4k != core_size_4k:
TestRun.fail(
f"Failed to fill cache. Expected dirty blocks: {core_size_4k}, "
f"actual value {dirty_4k}"
"Failed to fill cache with dirty data\n"
f"Expected dirty blocks: {core_size_4k}\n"
f"Actual value: {dirty_4k}"
)

with TestRun.step(f"Discard 4k of data on exported object"):
TestRun.executor.run_expect_success(f"blkdiscard {core.path} --length 4096 --offset 0")
with TestRun.step("Discard 4k of data on exported object"):
TestRun.executor.run_expect_success(f"blkdiscard {core.path} --length 4096 --offset 4096")
old_occupancy = cache.get_statistics().usage_stats.occupancy.get_value(Unit.Blocks4096)

with TestRun.step("Power cycle"):
with TestRun.step("Power cycle DUT"):
power_control = TestRun.plugin_manager.get_plugin("power_control")
power_control.power_cycle()
power_control.power_cycle(wait_for_connection=True)

with TestRun.step("Disable udev after power cycle"):
Udev.disable()

with TestRun.step("Load cache"):
Expand All @@ -170,14 +187,15 @@ def test_trim_propagation():
new_occupancy = cache.get_statistics().usage_stats.occupancy.get_value(Unit.Blocks4096)
if new_occupancy != old_occupancy:
TestRun.LOGGER.error(
f"Expected occupancy after dirty shutdown: {old_occupancy}. "
f"Actuall: {new_occupancy})"
"Wrong number of occupancy blocks after power cycle\n"
f"Expected occupancy after dirty shutdown: {old_occupancy}\n"
f"Actual: {new_occupancy}"
)

with TestRun.step("Verify data after dirty shutdown"):
cas_fio.read_write(ReadWrite.read)
cas_fio.offset(Unit.Blocks4096)
cas_fio.run()
with TestRun.step("Verify data after power cycle"):
fio.read_write(ReadWrite.read)
fio.offset(Unit.Blocks4096)
fio.run()


@pytest.mark.os_dependent
Expand Down
Loading