diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index dfa7c8d0ea..4e7bcf38b9 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -12,7 +12,7 @@ jobs: - name: "CLA Assistant" if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target' # Beta Release - uses: cla-assistant/github-action@v2.1.3-beta + uses: cla-assistant/github-action@v2.4.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # the below token should have repo scope and must be manually added by you in the repository's secret diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml index 10d0cdb437..a246352abf 100644 --- a/.github/workflows/gh-pages.yml +++ b/.github/workflows/gh-pages.yml @@ -8,23 +8,23 @@ on: jobs: deploy: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - name: Git checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: submodules: true # Fetch Hugo themes (true OR recursive) fetch-depth: 0 # Fetch all history for .GitInfo and .Lastmod ref: main - name: Setup Hugo - uses: peaceiris/actions-hugo@v2 + uses: peaceiris/actions-hugo@v3 with: - hugo-version: 'latest' + hugo-version: '0.128.2' extended: true - name: Cache Hugo modules - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: /tmp/hugo_cache key: ${{ runner.os }}-hugomod-${{ hashFiles('**/go.sum') }} @@ -32,12 +32,12 @@ jobs: ${{ runner.os }}-hugomod- - name: Setup Node - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: - node-version: '14' + node-version: '20' - name: Cache dependencies - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.npm key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} @@ -51,7 +51,7 @@ jobs: # run: hugo --gc - name: Deploy - uses: peaceiris/actions-gh-pages@v3 + uses: peaceiris/actions-gh-pages@v4 if: github.ref == 'refs/heads/main' with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 26d948602c..ae663ecb0b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,13 @@ +docs/ *.DS_Store* node_modules/ .vscode/* yarn.lock .hugo_build.lock +/.idea/.gitignore +/.idea/altinityknowledgebase.iml +/resources/_gen/assets/scss/scss/main.scss_3f90599f3717b4a4920df16fdcadce3d.content +/resources/_gen/assets/scss/scss/main.scss_3f90599f3717b4a4920df16fdcadce3d.json +/.idea/modules.xml +/.idea/vcs.xml +/.idea/inspectionProfiles/Project_Default.xml diff --git a/README.md b/README.md index 1339a1c617..c857183f29 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ Welcome to the Altinity Knowledgebase Repository! This Knowledgebase was established for Altinity Engineers and ClickHouse community members to work together to find common solutions. -Submissions and merges to this repository are distrubuted at https://kb.altinity.com . +Submissions and merges to this repository are distributed at https://kb.altinity.com . This knowledgebase is licensed under Apache 2.0. Contributors who submit to the Altinity Knowledgebase agree to the Altinity Contribution License Agreement. diff --git a/assets/icons/logo.svg b/assets/icons/logo.svg index 063493b0af..192e1a9eb5 100644 --- a/assets/icons/logo.svg +++ b/assets/icons/logo.svg @@ -1 +1,46 @@ -logo_1 \ No newline at end of file + + + + + + + + + + + + + + + + diff --git a/assets/scss/_content.scss b/assets/scss/_content.scss index 7fea32581b..221ddb4e95 100755 --- a/assets/scss/_content.scss +++ b/assets/scss/_content.scss @@ -5,13 +5,174 @@ Styles to override the theme. */ .td-navbar { - background: #333; + max-width: 1280px; + background: #132f48; } + .td-navbar .navbar-brand svg { margin: -8px 10px 0; } +li.nav-item { + margin-bottom: 0; +} + +.navbar-dark .navbar-nav .nav-link, +.navbar-dark .navbar-nav .nav-link:hover, +.navbar-dark .navbar-nav .nav-link:focus { + color: #fff; + line-height: 65px; + margin-bottom: 0; + padding: 0 10px; +} + +.dropdown-toggle::after { + position: relative; + /* top: 3pt; Uncomment this to lower the icons as requested in comments*/ + content: ""; + display: inline-block; + /* By using an em scale, the arrows will size with the font */ + width: 0.5em; + height: 0.5em; + border-top: 0 none; + border-left: 0 none; + border-right: 2px solid #fff; + border-bottom: 2px solid #fff; + transform: rotate(45deg); + margin-left: 0.5rem +} + +.dropdown-menu { + position: absolute; + top: 100%; + left: 0; + z-index: 1000; + display: none; + float: left; + min-width: 16em; + padding: 0 0 1rem; + margin: 0; + font-size: 1rem; + color: #fff; + text-align: left; + list-style: none; + background-color: rgba(19, 47, 72, 0.9); + background-clip: padding-box; + border: 0px solid transparent; + border-radius: 0; + box-shadow: none; + backdrop-filter: blur(4px); +} + +.dropdown-item, +.dropdown-item:hover, +.dropdown-item:focus { + display: block; + width: 100%; + padding: 0.5rem 1.5rem; + clear: both; + font-weight: 400; + color: #fff; + text-align: inherit; + white-space: nowrap; + background-color: transparent; + border: 0; +} +.dropdown-item:hover, +.dropdown-item:focus { + text-decoration:underline; + background: transparent; +} + +.dropdown-menu[data-bs-popper]{ + margin-top: 0; +} + +@media (max-width: 992px) { + .navbar-nav .nav-item{ + display:none; + } +} + +.header-social-wrap a { + text-decoration: none; +} + +@media (min-width: 992px) { + body>header { + position: fixed; + top: 0; + width: 100%; + background: rgba(19, 47, 72, 0.9); + z-index:1000; + min-height: 65px; + backdrop-filter: blur(4px); + } + .td-navbar { + position:relative; + margin: 0 auto; + padding-left: 5px; + padding-right: 5px; + background:transparent; + min-height: 65px; + padding-top:0; + padding-bottom:0; + } + .td-navbar-nav-scroll{ + width: 100%; + } + + .td-navbar .navbar-brand svg { + width: 30px; + margin: -8px 10px 0 0; + height: auto; + } + .td-navbar .navbar-brand span.font-weight-bold { + display:inline-block; + vertical-align: 1px; + font-size:18px; + } + + .td-sidebar { + padding-top: 75px; + background-color: #e9ecf0; + padding-right: 1rem; + border-right: 1px solid #dee2e6; + } + + .td-sidebar-toc { + border-left: 1px solid $border-color; + + @supports (position: sticky) { + position: sticky; + top: 75px; + height: calc(100vh - 85px); + overflow-y: auto; + } + order: 2; + padding-top: 5px; + padding-bottom: 1.5rem; + vertical-align: top; + } + + + .header-social-wrap { + height: 65px; + display: flex; + margin-left: auto; + align-items: center; + margin-bottom: 0; + } +} + + + + +footer.bg-dark { + background: #132f48 !important; +} + img { max-width:100%; height: auto; @@ -70,11 +231,11 @@ th { tr:nth-child(odd) { background: $td-sidebar-bg-color; -} +} tr:nth-child(even) { background-color: rgba(233, 236, 240, 0.5); -} +} .feedback--title { @@ -85,3 +246,92 @@ tr:nth-child(even) { .feedback--answer { width: 4em; } + + +// LEFT SIDEBAR + +@media (min-width: 768px){ + .td-sidebar-nav { + min-height: 100%; + }} + +#m-upgrade_ebook, +#m-join_slack, +#m-maintenance_ebook, +#m-clickhouse_training { + font-weight: bold; + color: #189DD0; + padding-left: 20px !important; + font-size: 15px; +} + +#m-upgrade_ebook:hover span, +#m-join_slack:hover span, +#m-maintenance_ebook:hover span, +#m-clickhouse_training:hover span { + text-decoration: underline; +} + +#m-clickhouse_training { + background:url('data:image/svg+xml,') left 3px no-repeat transparent; + background-size: 17px; +} + +#m-contact_us { + background:url('data:image/svg+xml,') left 3px no-repeat transparent; + background-size: 17px; +} + +#m-join_slack { + background:url('data:image/svg+xml,') left 3px no-repeat transparent; + background-size: 17px; +} + +#m-maintenance_ebook { + background:url('data:image/svg+xml,') left 3px no-repeat transparent; + background-size: 17px; +} + +#m-upgrade_ebook { + background:url('data:image/svg+xml,') left 3px no-repeat transparent; + background-size: 17px; +} + +#m-join_slack-li, +#m-upgrade_ebook-li { + padding-top:20px; + border-top: 1px #189DD0 solid; + margin-top:20px; +} + + + +footer { + min-height: auto !important; + color: #fff; +} +footer a, footer a:hover, footer a:active { + color: #fff; +} + +footer .nav li { + font-size: 14px; + line-height: 1.8; +} + +// Twitter icon fix + +footer i.fab.fa-twitter:before{ + content: ' '; + width: 24px; + height: 24px; + display:inline-block; + background: url('data:image/svg+xml,') center bottom no-repeat transparent; + background-size: contain; + vertical-align: -3px; +} + +footer .footer-inner { + max-width: 1280px; + margin: 0 auto; +} diff --git a/config.toml b/config.toml index 8b05c360ee..381e637b5a 100644 --- a/config.toml +++ b/config.toml @@ -1,6 +1,6 @@ baseURL = "http://kb.altinity.com/" languageCode = "en-us" -title = "Altinity Knowledge Base" +title = "Altinity® Knowledge Base for ClickHouse®" # theme = ["docsy"] publishDir = "docs" enableRobotsTXT = true @@ -54,8 +54,8 @@ anchor = "smart" [languages] [languages.en] -title = "Altinity Knowledge Base" -description = "Altinity Knowledge Base" +title = "Altinity® Knowledge Base for ClickHouse®" +description = "Altinity® Knowledge Base for ClickHouse®" languageName = "English" # Weight used for sorting. weight = 1 @@ -76,7 +76,7 @@ time_format_blog = "2006.01.02" [params] # copyright = " Altinity Inc." -copyright = " Altinity Inc. Altinity® and Altinity.Cloud® are registered trademarks of Altinity, Inc. ClickHouse® is a registered trademark of ClickHouse, Inc." +copyright = " Altinity Inc. Altinity®, Altinity.Cloud®, and Altinity Stable® are registered trademarks of Altinity, Inc. ClickHouse® is a registered trademark of ClickHouse, Inc.; Altinity is not affiliated with or associated with ClickHouse, Inc. Kafka, Kubernetes, MySQL, and PostgreSQL are trademarks and property of their respective owners." privacy_policy = "https://altinity.com/privacy-policy/" favicon = "/favicon.ico" @@ -158,28 +158,37 @@ enable = false [params.links] # End user relevant links. These will show up on left side of footer and in the community page if you have one. -[[params.links.user]] - name ="Twitter" +[[params.links.developer]] + name ="Slack" + url = "https://altinity.com/slack" + icon = "fab fa-slack" + desc = "Join our Slack Community" +[[params.links.developer]] + name ="X" url = "https://twitter.com/AltinityDB" icon = "fab fa-twitter" - desc = "Follow us on Twitter to get the latest news!" -[[params.links.user]] + desc = "Follow us on X to get the latest news!" +[[params.links.developer]] + name = "LinkedIn" + url = "https://www.linkedin.com/company/altinity/" + icon = "fab fa-linkedin" + desc = "Partner with us on LinkedIn." +[[params.links.developer]] name = "Youtube" url = "https://www.youtube.com/channel/UCE3Y2lDKl_ZfjaCrh62onYA" icon = "fab fa-youtube" desc = "Watch our videos." -[[params.links.user]] - name = "LinkedIn" - url = "https://www.linkedin.com/company/altinity/" - icon = "fab fa-linkedin" - desc = "Partner with us on LinkedIn." # Developer relevant links. These will show up on right side of footer and in the community page if you have one. [[params.links.developer]] name = "GitHub" - url = "https://github.com/orgs/Altinity/" + url = "https://github.com/Altinity/altinityknowledgebase" icon = "fab fa-github" - desc = "Development takes place here!" - + desc = "Development takes place here!" +[[params.links.developer]] + name = "Reddit" + url = "https://www.reddit.com/r/Clickhouse/" + icon = "fab fa-reddit" + desc = "Altinity on Reddit" [outputFormats] [outputFormats.PRINT] baseName = "index" @@ -199,4 +208,4 @@ section = [ "HTML", "print"] [[module.imports]] path = "github.com/google/docsy" [[module.imports]] - path = "github.com/google/docsy/dependencies" \ No newline at end of file + path = "github.com/google/docsy/dependencies" diff --git a/content/en/_index.md b/content/en/_index.md index 92ba47dae1..43b10260ff 100755 --- a/content/en/_index.md +++ b/content/en/_index.md @@ -1,7 +1,7 @@ --- -title: "Altinity Knowledge Base" -linkTitle: "Altinity Knowledge Base" -description: "Up-to-date ClickHouse knowledge base for every ClickHouse user." +title: "Altinity® Knowledge Base for ClickHouse®" +linkTitle: "Altinity® Knowledge Base for ClickHouse®" +description: "Up-to-date ClickHouse® knowledge base for every ClickHouse user." keywords: - ClickHouse Knowledge Base - Altinity Knowledge Base @@ -12,7 +12,7 @@ cascade: _target: path: "/**" --- -## Welcome to the Altinity ClickHouse Knowledge Base (KB) +## Welcome to the Altinity® Knowledge Base (KB) for ClickHouse® This knowledge base is supported by [Altinity](http://altinity.com/) engineers to provide quick answers to common questions and issues involving ClickHouse. @@ -21,11 +21,17 @@ The [Altinity Knowledge Base is licensed under Apache 2.0](https://github.com/Al For more detailed information about Altinity services support, see the following: * [Altinity](https://altinity.com/): Providers of Altinity.Cloud, providing SOC-2 certified support for ClickHouse. -* [Altinity ClickHouse Documentation](https://docs.altinity.com): Detailed guides on installing and connecting ClickHouse to other services. -* [Altinity Resources](https://altinity.com/resources/): News, blog posts, and webinars about ClickHouse and Altinity services. +* [Altinity.com Documentation](https://docs.altinity.com): Detailed guides on working with: + * [Altinity.Cloud](https://docs.altinity.com/altinitycloud/) + * [Altinity.Cloud Anywhere](https://docs.altinity.com/altinitycloudanywhere/) + * [The Altinity Cloud Manager](https://docs.altinity.com/altinitycloud/quickstartguide/clusterviewexplore/) + * [The Altinity Kubernetes Operator for ClickHouse](https://docs.altinity.com/releasenotes/altinity-kubernetes-operator-release-notes/) + * [The Altinity Sink Connector for ClickHouse](https://docs.altinity.com/releasenotes/altinity-sink-connector-release-notes/) and + * [Altinity Backup for ClickHouse](https://docs.altinity.com/releasenotes/altinity-backup-release-notes/) +* [Altinity Blog](https://altinity.com/blog/): Blog posts about ClickHouse the database and Altinity services. The following sites are also useful references regarding ClickHouse: -* [ClickHouse.tech documentation](https://clickhouse.tech/docs/en/): From Yandex, the creators of ClickHouse +* [ClickHouse.com documentation](https://clickhouse.com/docs/en/): Official documentation from ClickHouse Inc. * [ClickHouse at Stackoverflow](https://stackoverflow.com/questions/tagged/clickhouse): Community driven responses to questions regarding ClickHouse * [Google groups (Usenet) yes we remember it](https://groups.google.com/g/clickhouse): The grandparent of all modern discussion boards. diff --git a/content/en/altinity-kb-dictionaries/_index.md b/content/en/altinity-kb-dictionaries/_index.md index 15cc00ca4e..64bcc3214e 100644 --- a/content/en/altinity-kb-dictionaries/_index.md +++ b/content/en/altinity-kb-dictionaries/_index.md @@ -6,11 +6,11 @@ keywords: - clickhouse arrays - postgresql dictionary description: > - All you need to know about creating and using ClickHouse dictionaries. + All you need to know about creating and using ClickHouse® dictionaries. weight: 11 --- -For more information on ClickHouse Dictionaries, see +For more information on ClickHouse® Dictionaries, see the presentation [https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup34/clickhouse_integration.pdf](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup34/clickhouse_integration.pdf), slides 82-95, video https://youtu.be/728Yywcd5ys?t=10642 @@ -20,7 +20,3 @@ https://altinity.com/blog/2020/5/19/clickhouse-dictionaries-reloaded And some videos: https://www.youtube.com/watch?v=FsVrFbcyb84 - -Also there 3rd party articles on the same subj. -https://prog.world/how-to-create-and-use-dictionaries-in-clickhouse/ - diff --git a/content/en/altinity-kb-dictionaries/altinity-kb-sparse_hashed-vs-hashed.md b/content/en/altinity-kb-dictionaries/altinity-kb-sparse_hashed-vs-hashed.md index bf27340627..5a2f5ce070 100644 --- a/content/en/altinity-kb-dictionaries/altinity-kb-sparse_hashed-vs-hashed.md +++ b/content/en/altinity-kb-dictionaries/altinity-kb-sparse_hashed-vs-hashed.md @@ -1,10 +1,10 @@ --- -title: "SPARSE_HASHED VS HASHED" -linkTitle: "SPARSE_HASHED VS HASHED" +title: "SPARSE_HASHED VS HASHED vs HASHED_ARRAY" +linkTitle: "SPARSE_HASHED VS HASHED vs HASHED_ARRAY" description: > - SPARSE_HASHED VS HASHED + SPARSE_HASHED VS HASHED VS HASHED_ARRAY --- -Sparse_hashed layout is supposed to save memory but has some downsides. We can test how much slower SPARSE_HASHED than HASHED is with the following: +Sparse_hashed and hashed_array layouts are supposed to save memory but has some downsides. We can test it with the following: ```sql create table orders(id UInt64, price Float64) @@ -22,6 +22,11 @@ PRIMARY KEY id SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 TABLE orders DB 'default' USER 'default')) LIFETIME(MIN 0 MAX 0) LAYOUT(SPARSE_HASHED()); +CREATE DICTIONARY orders_hashed_array (id UInt64, price Float64) +PRIMARY KEY id SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 +TABLE orders DB 'default' USER 'default')) +LIFETIME(MIN 0 MAX 0) LAYOUT(HASHED_ARRAY()); + SELECT name, type, @@ -30,26 +35,32 @@ SELECT formatReadableSize(bytes_allocated) AS RAM FROM system.dictionaries WHERE name LIKE 'orders%' -┌─name──────────┬─type─────────┬─status─┬─element_count─┬─RAM────────┐ -│ orders_sparse │ SparseHashed │ LOADED │ 5000000 │ 84.29 MiB │ -│ orders_hashed │ Hashed │ LOADED │ 5000000 │ 256.00 MiB │ -└───────────────┴──────────────┴────────┴───────────────┴────────────┘ +┌─name────────────────┬─type─────────┬─status─┬─element_count─┬─RAM────────┐ +│ orders_hashed_array │ HashedArray │ LOADED │ 5000000 │ 68.77 MiB │ +│ orders_sparse │ SparseHashed │ LOADED │ 5000000 │ 76.30 MiB │ +│ orders_hashed │ Hashed │ LOADED │ 5000000 │ 256.00 MiB │ +└─────────────────────┴──────────────┴────────┴───────────────┴────────────┘ SELECT sum(dictGet('default.orders_hashed', 'price', toUInt64(number))) AS res FROM numbers(10000000) ┌─res─┐ │ 0 │ └─────┘ -1 rows in set. Elapsed: 0.279 sec. Processed 10.02 million rows ... +1 rows in set. Elapsed: 0.546 sec. Processed 10.01 million rows ... SELECT sum(dictGet('default.orders_sparse', 'price', toUInt64(number))) AS res FROM numbers(10000000) ┌─res─┐ │ 0 │ └─────┘ -1 rows in set. Elapsed: 1.085 sec. Processed 10.02 million rows ... -``` +1 rows in set. Elapsed: 1.422 sec. Processed 10.01 million rows ... -As you can see **SPARSE_HASHED** is memory efficient and use about 3 times less memory (!!!) but is almost 4 times slower. But this is the ultimate case because this test does not read data from the disk (no MergeTree table involved). +SELECT sum(dictGet('default.orders_hashed_array', 'price', toUInt64(number))) AS res +FROM numbers(10000000) +┌─res─┐ +│ 0 │ +└─────┘ +1 rows in set. Elapsed: 0.558 sec. Processed 10.01 million rows ... +``` -We encourage you to test **SPARSE_HASHED** against your real queries, because it able to save a lot of memory and have larger (in rows) external dictionaries. +As you can see **SPARSE_HASHED** is memory efficient and use about 3 times less memory (!!!) but is almost 3 times slower as well. On the other side **HASHED_ARRAY** is even more efficient in terms of memory usage and maintains almost the same performance as **HASHED** layout. diff --git a/content/en/altinity-kb-dictionaries/dictionaries-and-arrays.md b/content/en/altinity-kb-dictionaries/dictionaries-and-arrays.md index db0352e2f2..8cfa4b0027 100644 --- a/content/en/altinity-kb-dictionaries/dictionaries-and-arrays.md +++ b/content/en/altinity-kb-dictionaries/dictionaries-and-arrays.md @@ -4,7 +4,7 @@ linkTitle: "Dictionaries & arrays" description: > Dictionaries & arrays --- -## Dictionary with Clickhouse table as a source +## Dictionary with ClickHouse® table as a source ### Test data diff --git a/content/en/altinity-kb-dictionaries/dictionary-on-top-tables.md b/content/en/altinity-kb-dictionaries/dictionary-on-top-tables.md index 8dc6c35ea4..a7fa9c0cf5 100644 --- a/content/en/altinity-kb-dictionaries/dictionary-on-top-tables.md +++ b/content/en/altinity-kb-dictionaries/dictionary-on-top-tables.md @@ -1,8 +1,8 @@ --- -title: "Dictionary on the top of the several tables using VIEW" -linkTitle: "Dictionary on the top of the several tables using VIEW" +title: "Dictionary on the top of several tables using VIEW" +linkTitle: "Dictionary on the top of several tables using VIEW" description: > - Dictionary on the top of the several tables using VIEW + Dictionary on the top of several tables using VIEW --- ```sql diff --git a/content/en/altinity-kb-dictionaries/dimension_table_desing.md b/content/en/altinity-kb-dictionaries/dimension_table_desing.md new file mode 100644 index 0000000000..21c5f4bcb8 --- /dev/null +++ b/content/en/altinity-kb-dictionaries/dimension_table_desing.md @@ -0,0 +1,161 @@ +--- +title: "Dimension table design " +linkTitle: "Dimension table design " +description: > + Dimension table design +--- +## Dimension table design considerations + +### Choosing storage Engine + +To optimize the performance of reporting queries, dimensional tables should be loaded into RAM as ClickHouse Dictionaries whenever feasible. It's becoming increasingly common to allocate 100-200GB of RAM per server specifically for these Dictionaries. Implementing sharding by tenant can further reduce the size of these dimension tables, enabling a greater portion of them to be stored in RAM and thus enhancing query speed. + +Different Dictionary Layouts can take more or less RAM (in trade for speed). + +- The cached dictionary layout is ideal for minimizing the amount of RAM required to store dimensional data when the hit ratio is high. This layout allows frequently accessed data to be kept in RAM while less frequently accessed data is stored on disk, thereby optimizing memory usage without sacrificing performance. +- HASHED_ARRAY or SPARSE_HASHED dictionary layouts take less RAM than HASHED. See tests [here](https://kb.altinity.com/altinity-kb-dictionaries/altinity-kb-sparse_hashed-vs-hashed/). +- Normalization techniques can be used to lower RAM usage (see below) + +If the amount of data is so high that it does not fit in the RAM even after suitable sharding, a disk-based table with an appropriate engine and its parameters can be used for accessing dimensional data in report queries. + +MergeTree engines (including Replacing or Aggregating) are not tuned by default for point queries due to the high index granularity (8192) and the necessity of using FINAL (or GROUP BY) when accessing mutated data. + +When using the MergeTree engine for Dimensions, the table’s index granularity should be lowered to 256. More RAM will be used for PK, but it’s a reasonable price for reading less data from the disk and making report queries faster, and that amount can be lowered by lightweight PK design (see below). + +The `EmbeddedRocksDB` engine could be used as an alternative. It performs much better than ReplacingMergeTree for highly mutated data, as it is tuned by design for random point queries and high-frequency updates. However, EmbeddedRocksDB does not support Replication, so INSERTing data to such tables should be done over a Distributed table with `internal_replication` set to false, which is vulnerable to different desync problems. Some “sync” procedures should be designed, developed, and applied after serious data ingesting incidents (like ETL crashes). + +When the Dimension table is built on several incoming event streams, `AggregatingMergeTree` is preferable to `ReplacingMergeTree`, as it allows putting data from different event streams without external ETL processes: + +```sql +CREATE TABLE table_C ( + id UInt64, + colA SimpleAggregatingFunction(any,Nullable(UInt32)), + colB SimpleAggregatingFunction(max, String) +) ENGINE = AggregatingMergeTree() +PARTITION BY intDiv(id, 0x800000000000000) /* 32 bucket*/ +ORDER BY id; + +CREATE MATERIALIZED VIEW mv_A TO table_C AS SELECT id,colA FROM Kafka_A; +CREATE MATERIALIZED VIEW mv_B TO table_C AS SELECT id,colB FROM Kafka_B; +``` + +EmbeddedRocksDB natively supports UPDATEs without any complications with AggregatingFunctions. + +For dimensions where some “start date” column is used in filtering, the [Range_Hashed](https://kb.altinity.com/altinity-kb-dictionaries/altinity-kb-range_hashed-example-open-intervals/) dictionary layout can be used if it is acceptable for RAM usage. For MergeTree variants, ASOF JOIN in queries is needed. Such types of dimensions are the first candidates for placement into RAM. + +EmbeddedRocksDB is not suitable here. + +### Primary Key + +To increase query performance, I recommend using a single UInt64 (not String) column for PK, where the upper 32 bits are reserved for tenant_id (shop_id) and the lower 32 bits for actual object_id (like customer_id, product_id, etc.) + +That benefits both EmbeddedRocksDB Engine (it can have only one Primary Key column) and ReplacingMergeTree, as FINAL processing will work much faster with a light ORDER BY column of a single UInt64 value. + +### Direct Dictionary and UDFs + +To make the SQL code of report queries more readable and manageable, I recommend always using Dictionaries to access dimensions. A `direct dictionary layout` should be used for disk-stored dimensions (EmbeddedRocksDB or *MergeTree). + +When Clickhouse builds a query to Direct Dictionary, it automatically creates a filter with a list of all needed ID values. There is no need to write code to filter necessary dimension rows to reduce the hash table for the right join table. + +Another trick for code manageability is creating an interface function for every dimension to place here all the complexity of managing IDs by packing several values into a single PK value: + +```sql +create or replace function getCustomer as (shop, id, attr) -> + dictGetOrNull('dict_Customers', attr, bitOr((bitShiftLeft(toUInt64(shop),32)),id)); +``` + +It also allows the flexibility of changing dictionary names when testing different types of Engines or can be used to spread dimensional data to several dictionaries. F.e. most active tenants can be served by expensive in-RAM dictionary, while others (not active) tenants will be served from disk. + +```sql +create or replace function getCustomer as (shop, id, attr) -> + dictGetOrDefault('dict_Customers_RAM', attr, bitOr((bitShiftLeft(toUInt64(shop),32)),id) as key, + dictGetOrNull('dict_Customers_MT', attr, key)); +``` + +We always recommended DENORMALIZATION for Fact tables. However, NORMALIZATION is still a usable approach for taking less RAM for Dimension data stored as dictionaries. + +Example of storing a long company name (String) in a separate dictionary: + +```sql +create or replace function getCustomer as (shop, id, attr) -> + if(attr='company_name', + dictGetOrDefault('dict_Company_name', 'name', + dictGetOrNull('dict_Customers', 'company_id', + bitOr((bitShiftLeft(toUInt64(shop),32)),id)) as key), + dictGetOrNull('dict_Customers', attr, key) + ); +``` + +Example of combining Hash and Direct Dictionaries. Allows to increase lifetime without losing consistency. + +```sql +CREATE OR REPLACE FUNCTION getProduct AS (product_id, attr) -> + dictGetOrDefault('hashed_dictionary', attr,(shop_id, product_id), + dictGet('direct_dictionary',attr,(shop_id, product_id) ) + ); +``` + +### Tests/Examples + +EmbeddedRocksDB + +```sql +CREATE TABLE Dim_Customers ( + id UInt64, + name String, + new_or_returning bool +) ENGINE = EmbeddedRocksDB() +PRIMARY KEY (id); + +INSERT INTO Dim_Customers +SELECT bitShiftLeft(3648061509::UInt64,32)+number, + ['Customer A', 'Customer B', 'Customer C', 'Customer D', 'Customer E'][number % 5 + 1], + number % 2 = 0 +FROM numbers(100); + +CREATE DICTIONARY dict_Customers +( + id UInt64, + name String, + new_or_returning bool +) +PRIMARY KEY id +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(TABLE 'Dim_Customers')); + +select dictGetOrNull('dict_Customers', 'name', + bitOr((bitShiftLeft(toUInt64(shop_id),32)),customer_id)); +``` + +ReplacingMergeTree + +```sql +CREATE TABLE Dim_Customers ( + id UInt64, + name String, + new_or_returning bool +) ENGINE = ReplacingMergeTree() +ORDER BY id +PARTITION BY intDiv(id, 0x800000000000000) /* 32 buckets by shop_id */ +settings index_granularity=256; + +CREATE DICTIONARY dict_Customers +( + id UInt64, + name String, + new_or_returning bool +) +PRIMARY KEY id +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(query 'select * from Dim_Customers FINAL')); + +set do_not_merge_across_partitions_select_final=1; -- or place it to profile +select dictGet('dict_Customers','name',bitShiftLeft(3648061509::UInt64,32)+1); +``` + +Tests 1M random reads over 10M entries per shop_id in the Dimension table + +- [EmbeddedRocksDB](https://fiddle.clickhouse.com/c304d0cc-f1c2-4323-bd65-ab82165aecb6) - 0.003s +- [ReplacingMergeTree](https://fiddle.clickhouse.com/093fc133-0685-4c97-aa90-d38200f93f9f)- 0.003s + +There is no difference in SELECT on that synthetic test with all MergeTree optimizations applied. The test must be rerun on actual data with the expected update volume. The difference could be seen on a table with high-volume real-time updates. diff --git a/content/en/altinity-kb-dictionaries/mysql8-source-for-dictionaries.md b/content/en/altinity-kb-dictionaries/mysql8-source-for-dictionaries.md index 3554bcda00..650519dff6 100644 --- a/content/en/altinity-kb-dictionaries/mysql8-source-for-dictionaries.md +++ b/content/en/altinity-kb-dictionaries/mysql8-source-for-dictionaries.md @@ -6,7 +6,7 @@ description: > --- #### Authorization -MySQL8 used default authorization plugin `caching_sha2_password`. Unfortunately, `libmysql` which currently used (21.4-) in clickhouse is not. +MySQL8 used default authorization plugin `caching_sha2_password`. Unfortunately, `libmysql` which currently used (21.4-) in ClickHouse® is not. You can fix it during create custom user with `mysql_native_password` authentication plugin. diff --git a/content/en/altinity-kb-dictionaries/partial-updates.md b/content/en/altinity-kb-dictionaries/partial-updates.md index 088e562e84..8efd312c3a 100644 --- a/content/en/altinity-kb-dictionaries/partial-updates.md +++ b/content/en/altinity-kb-dictionaries/partial-updates.md @@ -4,7 +4,7 @@ linkTitle: "Partial updates" description: > Partial updates --- -Clickhouse is able to fetch from a source only updated rows. You need to define `update_field` section. +ClickHouse® is able to fetch from a source only updated rows. You need to define `update_field` section. As an example, We have a table in an external source MySQL, PG, HTTP, ... defined with the following code sample: @@ -36,4 +36,4 @@ LIFETIME(MIN 30 MAX 30) A dictionary with **update_field** `updated_at` will fetch only updated rows. A dictionary saves the current time (now) time of the last successful update and queries the source `where updated_at >= previous_update - 1` (shift = 1 sec.). -In case of HTTP source Clickhouse will send get requests with **update_field** as an URL parameter `&updated_at=2020-01-01%2000:01:01` +In case of HTTP source ClickHouse will send get requests with **update_field** as an URL parameter `&updated_at=2020-01-01%2000:01:01` diff --git a/content/en/altinity-kb-dictionaries/security-named-collections.md b/content/en/altinity-kb-dictionaries/security-named-collections.md new file mode 100644 index 0000000000..5c34dd11d8 --- /dev/null +++ b/content/en/altinity-kb-dictionaries/security-named-collections.md @@ -0,0 +1,47 @@ +--- +title: "Security named collections" +linkTitle: "Security named collections" +description: > + Security named collections +--- + + +## Dictionary with ClickHouse® table as a source with named collections + +### Data for connecting to external sources can be stored in named collections + +```xml + + + + localhost + 9000 + default + ch_dict + mypass + + + +``` + +### Dictionary + +```sql +DROP DICTIONARY IF EXISTS named_coll_dict; +CREATE DICTIONARY named_coll_dict +( + key UInt64, + val String +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(NAME local_host TABLE my_table DB default)) +LIFETIME(MIN 1 MAX 2) +LAYOUT(HASHED()); + +INSERT INTO my_table(key, val) VALUES(1, 'first row'); + +SELECT dictGet('named_coll_dict', 'b', 1); +┌─dictGet('named_coll_dict', 'b', 1)─┐ +│ first row │ +└────────────────────────────────────┘ +``` diff --git a/content/en/altinity-kb-functions/array-like-memory-usage.md b/content/en/altinity-kb-functions/array-like-memory-usage.md index a88ceec5a1..69dbedf0f5 100644 --- a/content/en/altinity-kb-functions/array-like-memory-usage.md +++ b/content/en/altinity-kb-functions/array-like-memory-usage.md @@ -2,12 +2,12 @@ title: "arrayMap, arrayJoin or ARRAY JOIN memory usage" linkTitle: "arrayMap, arrayJoin or ARRAY JOIN memory usage" description: > - Why arrayMap, arrayFilter, arrayJoin use so much memory? + Why do arrayMap, arrayFilter, and arrayJoin use so much memory? --- ## arrayMap-like functions memory usage calculation. -In order to calculate arrayMap or similar array* functions ClickHouse temporarily does arrayJoin-like operation, which in certain conditions can lead to huge memory usage for big arrays. +In order to calculate arrayMap or similar array* functions ClickHouse® temporarily does arrayJoin-like operation, which in certain conditions can lead to huge memory usage for big arrays. So for example, you have 2 columns: diff --git a/content/en/altinity-kb-functions/arrayfold.md b/content/en/altinity-kb-functions/arrayfold.md new file mode 100644 index 0000000000..3a66ce055d --- /dev/null +++ b/content/en/altinity-kb-functions/arrayfold.md @@ -0,0 +1,17 @@ +--- +title: "arrayFold" +linkTitle: "arrayFold" +--- + +## EWMA example + +```sql +WITH + [40, 45, 43, 31, 20] AS data, + 0.3 AS alpha +SELECT arrayFold((acc, x) -> arrayPushBack(acc, (alpha * x) + ((1 - alpha) * (acc[-1]))), arrayPopFront(data), [CAST(data[1], 'Float64')]) as ewma + +┌─ewma─────────────────────────────────────────────────────────────┐ +│ [40,41.5,41.949999999999996,38.66499999999999,33.06549999999999] │ +└──────────────────────────────────────────────────────────────────┘ +``` diff --git a/content/en/altinity-kb-functions/assumenotnull-and-friends.md b/content/en/altinity-kb-functions/assumenotnull-and-friends.md index 7ba6ca1ee5..760d2afbbf 100644 --- a/content/en/altinity-kb-functions/assumenotnull-and-friends.md +++ b/content/en/altinity-kb-functions/assumenotnull-and-friends.md @@ -89,7 +89,7 @@ Code: 36, e.displayText() = DB::Exception: Unexpected value 0 in enum, Stack tra ``` {{% alert title="Info" color="info" %}} -Null values in ClickHouse are stored in a separate dictionary: is this value Null. And for faster dispatch of functions there is no check on Null value while function execution, so functions like plus can modify internal column value (which has default value). In normal conditions it’s not a problem because on read attempt, ClickHouse first would check the Null dictionary and return value from column itself for non-Nulls only. And `assumeNotNull` function just ignores this Null dictionary. So it would return only column values, and in certain cases it’s possible to have unexpected results. +Null values in ClickHouse® are stored in a separate dictionary: is this value Null. And for faster dispatch of functions there is no check on Null value while function execution, so functions like plus can modify internal column value (which has default value). In normal conditions it’s not a problem because on read attempt, ClickHouse first would check the Null dictionary and return value from column itself for non-Nulls only. And `assumeNotNull` function just ignores this Null dictionary. So it would return only column values, and in certain cases it’s possible to have unexpected results. {{% /alert %}} If it's possible to have Null values, it's better to use `ifNull` function instead. diff --git a/content/en/altinity-kb-functions/how-to-encode-decode-quantiletdigest-state.md b/content/en/altinity-kb-functions/how-to-encode-decode-quantiletdigest-state.md new file mode 100644 index 0000000000..766d864fec --- /dev/null +++ b/content/en/altinity-kb-functions/how-to-encode-decode-quantiletdigest-state.md @@ -0,0 +1,85 @@ +--- +title: "How to encode/decode quantileTDigest states from/to list of centroids" +linkTitle: "Encoding and Decoding of quantileTDigest states" +weight: 100 +description: >- + A way to export or import quantileTDigest states from/into ClickHouse® +--- + +## quantileTDigestState + +quantileTDigestState is stored in two parts: a count of centroids in LEB128 format + list of centroids without a delimiter. Each centroid is represented as two Float32 values: Mean & Count. + +```sql +SELECT + hex(quantileTDigestState(1)), + hex(toFloat32(1)) + +┌─hex(quantileTDigestState(1))─┬─hex(toFloat32(1))─┐ +│ 010000803F0000803F │ 0000803F │ +└──────────────────────────────┴───────────────────┘ + 01 0000803F 0000803F + ^ ^ ^ + LEB128 Float32 Mean Float32 Count +``` + +We need to make two helper `UDF` functions: + +```xml +cat /etc/clickhouse-server/decodeTDigestState_function.xml + + + executable + 0 + decodeTDigestState + Array(Tuple(mean Float32, count Float32)) + + AggregateFunction(quantileTDigest, UInt32) + + RowBinary + cat + 0 + + + +cat /etc/clickhouse-server/encodeTDigestState_function.xml + + + executable + 0 + encodeTDigestState + AggregateFunction(quantileTDigest, UInt32) + + Array(Tuple(mean Float32, count Float32)) + + RowBinary + cat + 0 + + +``` + +Those UDF – `(encode/decode)TDigestState` converts `TDigestState` to the `Array(Tuple(Float32, Float32))` and back. + +```sql +SELECT quantileTDigest(CAST(number, 'UInt32')) AS result +FROM numbers(10) + +┌─result─┐ +│ 4 │ +└────────┘ + +SELECT decodeTDigestState(quantileTDigestState(CAST(number, 'UInt32'))) AS state +FROM numbers(10) + +┌─state─────────────────────────────────────────────────────────┐ +│ [(0,1),(1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1)] │ +└───────────────────────────────────────────────────────────────┘ + +SELECT finalizeAggregation(encodeTDigestState(CAST('[(0,1),(1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1),(8,1),(9,1)]', 'Array(Tuple(Float32, Float32))'))) AS result + +┌─result─┐ +│ 4 │ +└────────┘ +``` + diff --git a/content/en/altinity-kb-functions/kurt_skew_statistics.md b/content/en/altinity-kb-functions/kurt_skew_statistics.md new file mode 100644 index 0000000000..84cd3b4950 --- /dev/null +++ b/content/en/altinity-kb-functions/kurt_skew_statistics.md @@ -0,0 +1,76 @@ +--- +title: "kurt & skew statistical functions in ClickHouse® +" +linkTitle: "kurt & skew" +weight: 100 +description: >- + How to make them return the same result like python scipy +--- + +```python +from scipy.stats import skew, kurtosis + +# Creating a dataset + +dataset = [10,17,71,6,55,38,27,61,48,46,21,38,2,67,35,77,29,31,27,67,81,82,75,81,31,38,68,95,37,34,65,59,81,28,82,80,35,3,97,42,66,28,85,98,45,15,41,61,24,53,97,86,5,65,84,18,9,32,46,52,69,44,78,98,61,64,26,11,3,19,0,90,28,72,47,8,0,74,38,63,88,43,81,61,34,24,37,53,79,72,5,77,58,3,61,56,1,3,5,61] + +print(skew(dataset, axis=0, bias=True), skew(dataset)) + +# -0.05785361619432152 -0.05785361619432152 +``` +```sql +WITH arrayJoin([10,17,71,6,55,38,27,61,48,46,21,38,2,67,35,77,29,31,27,67,81,82,75,81,31,38,68,95,37,34,65,59,81,28,82,80,35,3,97,42,66,28,85,98,45,15,41,61,24,53,97,86,5,65,84,18,9,32,46,52,69,44,78,98,61,64,26,11,3,19,0,90,28,72,47,8,0,74,38,63,88,43,81,61,34,24,37,53,79,72,5,77,58,3,61,56,1,3,5,61]) AS value +SELECT skewPop(value) AS ex_1 + +┌──────────────────ex_1─┐ +│ -0.057853616194321014 │ +└───────────────────────┘ +``` +```python +print(skew(dataset, bias=False)) + +# -0.05873838908626328 +``` +```sql +WITH arrayJoin([10, 17, 71, 6, 55, 38, 27, 61, 48, 46, 21, 38, 2, 67, 35, 77, 29, 31, 27, 67, 81, 82, 75, 81, 31, 38, 68, 95, 37, 34, 65, 59, 81, 28, 82, 80, 35, 3, 97, 42, 66, 28, 85, 98, 45, 15, 41, 61, 24, 53, 97, 86, 5, 65, 84, 18, 9, 32, 46, 52, 69, 44, 78, 98, 61, 64, 26, 11, 3, 19, 0, 90, 28, 72, 47, 8, 0, 74, 38, 63, 88, 43, 81, 61, 34, 24, 37, 53, 79, 72, 5, 77, 58, 3, 61, 56, 1, 3, 5, 61]) AS value +SELECT + skewSamp(value) AS ex_1, + (pow(count(), 2) * ex_1) / ((count() - 1) * (count() - 2)) AS G + +┌─────────────────ex_1─┬────────────────────G─┐ +│ -0.05698798509149213 │ -0.05873838908626276 │ +└──────────────────────┴──────────────────────┘ +``` +```python +print(kurtosis(dataset, bias=True, fisher=False), kurtosis(dataset, bias=True, fisher=True), kurtosis(dataset)) + +# 1.9020275610791184 -1.0979724389208816 -1.0979724389208816 +``` +```sql +WITH arrayJoin([10, 17, 71, 6, 55, 38, 27, 61, 48, 46, 21, 38, 2, 67, 35, 77, 29, 31, 27, 67, 81, 82, 75, 81, 31, 38, 68, 95, 37, 34, 65, 59, 81, 28, 82, 80, 35, 3, 97, 42, 66, 28, 85, 98, 45, 15, 41, 61, 24, 53, 97, 86, 5, 65, 84, 18, 9, 32, 46, 52, 69, 44, 78, 98, 61, 64, 26, 11, 3, 19, 0, 90, 28, 72, 47, 8, 0, 74, 38, 63, 88, 43, 81, 61, 34, 24, 37, 53, 79, 72, 5, 77, 58, 3, 61, 56, 1, 3, 5, 61]) AS value +SELECT + kurtPop(value) AS pearson, + pearson - 3 AS fisher + +┌────────────pearson─┬──────────────fisher─┐ +│ 1.9020275610791124 │ -1.0979724389208876 │ +└────────────────────┴─────────────────────┘ +``` +```python +print(kurtosis(dataset, bias=False)) + +# -1.0924286152713967 +``` +```sql +WITH arrayJoin([10, 17, 71, 6, 55, 38, 27, 61, 48, 46, 21, 38, 2, 67, 35, 77, 29, 31, 27, 67, 81, 82, 75, 81, 31, 38, 68, 95, 37, 34, 65, 59, 81, 28, 82, 80, 35, 3, 97, 42, 66, 28, 85, 98, 45, 15, 41, 61, 24, 53, 97, 86, 5, 65, 84, 18, 9, 32, 46, 52, 69, 44, 78, 98, 61, 64, 26, 11, 3, 19, 0, 90, 28, 72, 47, 8, 0, 74, 38, 63, 88, 43, 81, 61, 34, 24, 37, 53, 79, 72, 5, 77, 58, 3, 61, 56, 1, 3, 5, 61]) AS value +SELECT + kurtSamp(value) AS ex_1, + (((pow(count(), 2) * (count() + 1)) / (((count() - 1) * (count() - 2)) * (count() - 3))) * ex_1) - ((3 * pow(count() - 1, 2)) / ((count() - 2) * (count() - 3))) AS G + +┌──────────────ex_1─┬───────────────────G─┐ +│ 1.864177212613638 │ -1.0924286152714027 │ +└───────────────────┴─────────────────────┘ +``` + + +[Google Collab](https://colab.research.google.com/drive/1xoWNi7QAJ9XZtCbmQqJFB8Z_mCreITPW?usp=sharing) diff --git a/content/en/altinity-kb-integrations/ClickHouse_python_drivers.md b/content/en/altinity-kb-integrations/ClickHouse_python_drivers.md new file mode 100644 index 0000000000..08c8bc89df --- /dev/null +++ b/content/en/altinity-kb-integrations/ClickHouse_python_drivers.md @@ -0,0 +1,172 @@ +--- +title: "ClickHouse® python drivers" +linkTitle: "ClickHouse® python drivers" +weight: 100 +description: >- + Python main drivers/clients for ClickHouse® +--- + +There are two main python drivers that can be used with ClickHouse. They all have their different set of features and use cases: + +### ClickHouse driver AKA [clickhouse-driver](https://clickhouse-driver.readthedocs.io/en/latest/) + +The **`clickhouse-driver`** is a Python library used for interacting with ClickHouse. Here's a summary of its features: + +1. **Connectivity**: **`clickhouse-driver`** allows Python applications to connect to ClickHouse servers over TCP/IP Native Interface (9000/9440 ports) and also HTTP interface but it is experimental. +2. **SQL Queries**: It enables executing SQL queries against ClickHouse databases from Python scripts, including data manipulation (insertion, deletion, updating) and data retrieval (select queries). +3. **Query Parameters**: Supports parameterized queries, which helps in preventing SQL injection attacks and allows for more efficient execution of repeated queries with different parameter values. +4. **Connection Pooling**: Provides support for connection pooling, which helps manage connections efficiently, especially in high-concurrency applications, by reusing existing connections instead of creating new ones for each query. +5. **Data Types**: Handles conversion between Python data types and ClickHouse data types, ensuring compatibility and consistency when passing data between Python and ClickHouse. +6. **Error Handling**: Offers comprehensive error handling mechanisms, including exceptions and error codes, to facilitate graceful error recovery and handling in Python applications. +7. **Asynchronous Support**: Supports asynchronous execution of queries using `asyncio`, allowing for non-blocking query execution in asynchronous Python applications. +8. **Customization**: Provides options for customizing connection settings, query execution behavior, and other parameters to suit specific application requirements and performance considerations. +9. **Compatibility**: Works with various versions of ClickHouse, ensuring compatibility and support for different ClickHouse features and functionalities. +10. **Documentation and Community**: Offers comprehensive documentation and active community support, including examples, tutorials, and forums, to assist developers in effectively using the library and addressing any issues or questions they may have. +11. **Supports multiple host** **on connection string** https://clickhouse-driver.readthedocs.io/en/latest/features.html#multiple-hosts +12. **Connection pooling** (aiohttp) + +**Python ecosystem libs/modules:** + +- Good Pandas/Numpy support: [https://clickhouse-driver.readthedocs.io/en/latest/features.html#numpy-pandas-support](https://clickhouse-driver.readthedocs.io/en/latest/features.html#numpy-pandas-support) +- Good SQLALchemy support: [https://pypi.org/project/clickhouse-sqlalchemy/](https://pypi.org/project/clickhouse-sqlalchemy/) + +This was the first python driver for ClickHouse. It has a mature codebase. By default ClickHouse drivers uses [synchronous code](https://clickhouse-driver.readthedocs.io/en/latest/quickstart.html#async-and-multithreading). There is a wrapper to convert code to asynchronous, [https://github.com/long2ice/asynch](https://github.com/long2ice/asynch) + +Here you can get a basic working example from Altinity repo for ingestion/selection using clickhouse-driver: + +[https://github.com/lesandie/clickhouse-tests/blob/main/scripts/test_ch_driver.py](https://github.com/lesandie/clickhouse-tests/blob/main/scripts/test_ch_driver.py) + +### ClickHouse-connect AKA [clickhouse-connect](https://clickhouse.com/docs/en/integrations/python) + +The ClickHouse Connect Python driver is the ClickHouse, Inc supported-official Python library. Here's a summary of its key features: + +1. **Connectivity**: allows Python applications to connect to ClickHouse servers over HTTP Interface (8123/8443 ports). +2. **Compatibility**: The driver is compatible with Python 3.x versions, ensuring that it can be used with modern Python applications without compatibility issues. +3. **Performance**: The driver is optimized for performance, allowing for efficient communication with ClickHouse databases to execute queries and retrieve results quickly, which is crucial for applications requiring low latency and high throughput. +4. **Query Execution**: Developers can use the driver to execute SQL queries against ClickHouse databases, including SELECT, INSERT, UPDATE, DELETE, and other SQL operations, enabling them to perform various data manipulation tasks from Python applications. +5. **Parameterized Queries**: The driver supports parameterized queries, allowing developers to safely pass parameters to SQL queries to prevent SQL injection attacks and improve query performance by reusing query execution plans. +6. **Data Type Conversion**: The driver automatically handles data type conversion between Python data types and ClickHouse data types, ensuring seamless integration between Python applications and ClickHouse databases without manual data type conversion. +7. **Error Handling**: The driver provides robust error handling mechanisms, including exceptions and error codes, to help developers handle errors gracefully and take appropriate actions based on the type of error encountered during query execution. +8. **Limited Asynchronous Support**: Some implementations of the driver offer asynchronous support, allowing developers to execute queries asynchronously to improve concurrency and scalability in asynchronous Python applications using asynchronous I/O frameworks like `asyncio`. +9. **Configuration Options**: The driver offers various configuration options, such as connection parameters, authentication methods, and connection pooling settings, allowing developers to customize the driver's behavior to suit their specific requirements and environment. +10. **Documentation and Community**: Offers comprehensive documentation and active community support, including examples, tutorials, and forums, to assist developers in effectively using the library and addressing any issues or questions they may have. [https://clickhouse.com/docs/en/integrations/language-clients/python/intro/](https://clickhouse.com/docs/en/integrations/language-clients/python/intro/) +11. **Multiple host on connection string not supported** https://github.com/ClickHouse/clickhouse-connect/issues/74 +12. **Connection pooling** (urllib3) + +**Python ecosystem libs/modules:** + +- Good Pandas/Numpy support: [https://clickhouse.com/docs/en/integrations/python#consuming-query-results-with-numpy-pandas-or-arrow](https://clickhouse.com/docs/en/integrations/python#consuming-query-results-with-numpy-pandas-or-arrow) +- Decent SQLAlchemy 1.3 and 1.4 support (limited feature set) + +It is the most recent driver with the latest feature set (query context and query streaming …. ), and in recent release [asyncio wrapper](https://github.com/ClickHouse/clickhouse-connect/releases/tag/v0.7.16) + +You can check multiple official examples here: + +[https://github.com/ClickHouse/clickhouse-connect/tree/457533df05fa685b2a1424359bea5654240ef971/examples](https://github.com/ClickHouse/clickhouse-connect/tree/457533df05fa685b2a1424359bea5654240ef971/examples) + +Also some Altinity examples from repo: + +[https://github.com/lesandie/clickhouse-tests/blob/main/scripts/test_ch_connect_asyncio_insert.py](https://github.com/lesandie/clickhouse-tests/blob/main/scripts/test_ch_connect_asyncio_insert.py) + +You can clone the repo and use the helper files like `DDL.sql` to setup some tests. + + +### Most common use cases: + +#### Connection pooler: + +- Clickhouse-connect can use a connection pooler (based on urllib3) https://clickhouse.com/docs/en/integrations/python#customizing-the-http-connection-pool +- Clickhouse-driver you can use **aiohttp** (https://docs.aiohttp.org/en/stable/client_advanced.html#limiting-connection-pool-size) + +#### Managing ClickHouse `session_id`: + +- clickhouse-driver + - Because it is using the Native Interface `session_id` is managed internally by clickhouse, so it is very rare (unless using asyncio) to get: + + `Code: 373. DB::Exception: Session is locked by a concurrent client. (SESSION_IS_LOCKED)` . + +- clickhouse-connect: How to use clickhouse-connect in a pythonic way and avoid getting `SESSION_IS_LOCKED` exceptions: + - [https://clickhouse.com/docs/en/integrations/python#managing-clickhouse-session-ids](https://clickhouse.com/docs/en/integrations/python#managing-clickhouse-session-ids) + - If you want to specify a session_id per query you should be able to use the setting dictionary to pass a `session_id` for each query (note that ClickHouse will automatically generate a `session_id` if none is provided). + + ```python + SETTINGS = {"session_id": "dagster-batch" + "-" + f"{time.time()}"} + client.query("INSERT INTO table ....", settings=SETTINGS) + ``` + + +Also in clickhouse documentation some explanation how to set `session_id` with another approach: [https://clickhouse.com/docs/en/integrations/python#managing-clickhouse-session-ids](https://clickhouse.com/docs/en/integrations/python#managing-clickhouse-session-ids) + +[ClickHouse Connect Driver API | ClickHouse Docs](https://clickhouse.com/docs/en/integrations/language-clients/python/driver-api#common-method-arguments) + +[Best practices with flask · Issue #73 · ClickHouse/clickhouse-connect](https://github.com/ClickHouse/clickhouse-connect/issues/73#issuecomment-1325280242) + +#### Asyncio (asynchronous wrappers) + +##### clickhouse-connect + +New release with [asyncio wrapper for clickhouse-connect](https://github.com/ClickHouse/clickhouse-connect/releases/tag/v0.7.16) + +How the wrapper works: https://clickhouse.com/docs/en/integrations/python#asyncclient-wrapper + +Wrapper and connection pooler example: + +```python +import clickhouse_connect +import asyncio +from clickhouse_connect.driver.httputil import get_pool_manager + +async def main(): + client = await clickhouse_connect.get_async_client(host='localhost', port=8123, pool_mgr=get_pool_manager()) + for i in range(100): + result = await client.query("SELECT name FROM system.databases") + print(result.result_rows) + +asyncio.run(main()) +``` + +`clickhouse-connect` code is synchronous by default and running synchronous functions in an async application is a workaround and might not be as efficient as using a library/wrapper designed for asynchronous operations from the ground up.. So you can use the current wrapper or you can use another approach with `asyncio` and `concurrent.futures` and `ThreadpoolExecutor` or `ProcessPoolExecutor`. Python GIL has a mutex over Threads but not to Processes so if you need performance at the cost of using processes instead of threads (not much different for medium workloads) you can use `ProcesspoolExecutor` instead. + +Some info about this from the tinybird guys https://www.tinybird.co/blog-posts/killing-the-processpoolexecutor + +For clickhouse-connect : + +```python +import asyncio +from concurrent.futures import ProcessPoolExecutor +import clickhouse_connect + +# Function to execute a query using clickhouse-connect synchronously +def execute_query_sync(query): + client = clickhouse_connect.get_client() # Adjust connection params as needed + result = client.query(query) + return result + +# Asynchronous wrapper function to run the synchronous function in a process pool +async def execute_query_async(query): + loop = asyncio.get_running_loop() + # Use ProcessPoolExecutor to execute the synchronous function + with ProcessPoolExecutor() as pool: + result = await loop.run_in_executor(pool, execute_query_sync, query) + return result + +async def main(): + query = "SELECT * FROM your_table LIMIT 10" # Example query + result = await execute_query_async(query) + print(result) + +# Run the async main function +if __name__ == '__main__': + asyncio.run(main()) +``` +##### Clickhouse-driver + +`clickhouse-driver` code is also synchronous and suffers the same problem as `clickhouse-connect` https://clickhouse-driver.readthedocs.io/en/latest/quickstart.html#async-and-multithreading + +So to use asynchronous approach it is recommended to use a connection pool and some asyncio wrapper that can hide the complexity of using the `ThreadPoolExecutor/ProcessPoolExecutor` + +- To begin testing such environment [aiohttp](https://docs.aiohttp.org/) is a good approach. Here an example: https://github.com/lesandie/clickhouse-tests/blob/main/scripts/test_aiohttp_inserts.py + This will use simply requests module and aiohttp (you can tune the connection pooler https://docs.aiohttp.org/en/stable/client_advanced.html#limiting-connection-pool-size) + +- Also `aiochclient` is another good wrapper https://github.com/maximdanilchenko/aiochclient for the HTTP interface +- For the native interface you can try https://github.com/long2ice/asynch, `asynch` is an asyncio ClickHouse Python Driver with native (TCP) interface support, which reuse most of [clickhouse-driver](https://github.com/mymarilyn/clickhouse-driver) and comply with [PEP249](https://www.python.org/dev/peps/pep-0249/). diff --git a/content/en/altinity-kb-integrations/Spark.md b/content/en/altinity-kb-integrations/Spark.md index e37f065f58..b1813c40cd 100644 --- a/content/en/altinity-kb-integrations/Spark.md +++ b/content/en/altinity-kb-integrations/Spark.md @@ -1,13 +1,9 @@ --- -title: "ClickHouse + Spark" +title: "ClickHouse® + Spark" linkTitle: "Spark" weight: 100 -description: >- - Spark --- -## ClickHouse + Spark - ### jdbc The trivial & natural way to talk to ClickHouse from Spark is using jdbc. There are 2 jdbc drivers: @@ -16,7 +12,7 @@ The trivial & natural way to talk to ClickHouse from Spark is using jdbc. There ClickHouse-Native-JDBC has some hints about integration with Spark even in the main README file. -'Official' driver does support some conversion of complex data types (Roarring bitmaps) for Spark-Clickhouse integration: https://github.com/ClickHouse/clickhouse-jdbc/pull/596 +'Official' driver does support some conversion of complex data types (Roaring bitmaps) for Spark-ClickHouse integration: https://github.com/ClickHouse/clickhouse-jdbc/pull/596 But proper partitioning of the data (to spark partitions) may be tricky with jdbc. @@ -54,21 +50,18 @@ Arrays, Higher-order functions, machine learning, integration with lot of differ ## More info + some unordered links (mostly in Chinese / Russian) -* Spark + ClickHouse: not a fight, but a symbiosis https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/spark_and_clickhouse.pdf (russian) -* Using a bunch of ClickHouse and Spark in MFI Soft https://www.youtube.com/watch?v=ID8eTnmag0s (russian) -* Spark read and write ClickHouse https://yerias.github.io/2020/12/08/clickhouse/9/#Jdbc%E6%93%8D%E4%BD%9Cclickhouse -* Spark reads and writes ClickHouse through jdbc https://blog.katastros.com/a?ID=01800-e40e1b3c-5fa4-4ea0-a3a8-f5e89ef0ce14 -* Spark JDBC write clickhouse operation summary https://www.jianshu.com/p/43f78c8a025b?hmsr=toutiao.io&utm_campaign=toutiao.io&utm_medium=toutiao.io&utm_source=toutiao.io https://toutiao.io/posts/m63yw89/preview -* Spark-sql is based on Clickhouse's DataSourceV2 data source extension (russian) +* Spark + ClickHouse: not a fight, but a symbiosis (Russian) https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/spark_and_clickhouse.pdf (russian) +* Using a bunch of ClickHouse and Spark in MFI Soft (Russian) https://www.youtube.com/watch?v=ID8eTnmag0s (russian) +* Spark read and write ClickHouse (Chinese: Spark读写ClickHouse) https://yerias.github.io/2020/12/08/clickhouse/9/#Jdbc%E6%93%8D%E4%BD%9Cclickhouse +* Spark JDBC write ClickHouse operation summary (Chinese: Spark JDBC 写 ClickHouse 操作总结) https://www.jianshu.com/p/43f78c8a025b?hmsr=toutiao.io&utm_campaign=toutiao.io&utm_medium=toutiao.io&utm_source=toutiao.io +* Spark-sql is based on ClickHouse's DataSourceV2 data source extension (Chinese: spark-sql基于ClickHouse的DataSourceV2数据源扩展) https://www.cnblogs.com/mengyao/p/4689866.html -* Alibaba integration instructions https://www.alibabacloud.com/help/doc-detail/191192.htm -* Tencent integration instructions https://intl.cloud.tencent.com/document/product/1026/35884 -* Yandex DataProc demo: loading files from S3 to ClickHouse with Spark https://www.youtube.com/watch?v=N3bZW0_rRzI -* Clickhouse official documentation_Spark JDBC writes some pits of ClickHouse https://blog.csdn.net/weixin_39615984/article/details/111206050 -* ClickHouse data import (Flink, Spark, Kafka, MySQL, Hive) https://zhuanlan.zhihu.com/p/299094269 -* Baifendian Big Data Technical Team: Practice of ClickHouse data synchronization solutionbased on multiple Spark tasks. https://www.6aiq.com/article/1635461873075 -* SPARK-CLICKHOUSE-ES REAL-TIME PROJECT EIGHTH DAY-PRECISE ONE-TIME CONSUMPTION SAVE OFFSET. https://www.freesion.com/article/71421322524/ -* Still struggling with real-time data warehouse selection, Spark + ClickHouse makes yoamazing! https://dbaplus.cn/news-73-3806-1.html -* HDFS+ClickHouse+Spark: A lightweight big data analysis system from 0 to 1. https://juejin.cn/post/6850418114962653198 -* ClickHouse Clustering for Spark Developer http://blog.madhukaraphatak.com/clickouse-clustering-spark-developer/ -* «Иногда приходится заглядывать в код Spark»: Александр Морозов (SEMrush) об использовании Scala, Spark и ClickHouse. https://habr.com/ru/company/jugru/blog/341288/ +* Alibaba integration instructions (English) https://www.alibabacloud.com/help/doc-detail/191192.htm +* Tencent integration instructions (English) https://intl.cloud.tencent.com/document/product/1026/35884 +* Yandex DataProc demo: loading files from S3 to ClickHouse with Spark (Russian) https://www.youtube.com/watch?v=N3bZW0_rRzI +* ClickHouse official documentation_Spark JDBC writes some pits of ClickHouse (Chinese: ClickHouse官方文档_Spark JDBC写ClickHouse的一些坑) https://blog.csdn.net/weixin_39615984/article/details/111206050 +* ClickHouse data import: Flink, Spark, Kafka, MySQL, Hive (Chinese: 篇五|ClickHouse数据导入 Flink、Spark、Kafka、MySQL、Hive) https://zhuanlan.zhihu.com/p/299094269 +* SPARK-CLICKHOUSE-ES REAL-TIME PROJECT EIGHTH DAY-PRECISE ONE-TIME CONSUMPTION SAVE OFFSET. (Chinese: SPARK-CLICKHOUSE-ES实时项目第八天-精确一次性消费保存偏移量) https://www.freesion.com/article/71421322524/ +* HDFS+ClickHouse+Spark: A lightweight big data analysis system from 0 to 1. (Chinese: HDFS+ClickHouse+Spark:从0到1实现一款轻量级大数据分析系统) https://juejin.cn/post/6850418114962653198 +* ClickHouse Clustering for Spark Developer (English) http://blog.madhukaraphatak.com/clickouse-clustering-spark-developer/ +* «Иногда приходится заглядывать в код Spark»: Александр Морозов (SEMrush) об использовании Scala, Spark и ClickHouse. (Russian) https://habr.com/ru/company/jugru/blog/341288/ diff --git a/content/en/altinity-kb-integrations/_index.md b/content/en/altinity-kb-integrations/_index.md index dc1cd483d1..6951848f56 100644 --- a/content/en/altinity-kb-integrations/_index.md +++ b/content/en/altinity-kb-integrations/_index.md @@ -6,6 +6,6 @@ keywords: - clickhouse bi - clickhouse kafka description: > - Learn how you can integrate cloud services, BI tools, kafka, MySQL, Spark, MindsDB, and more with ClickHouse. + Learn how you can integrate cloud services, BI tools, kafka, MySQL, Spark, MindsDB, and more with ClickHouse® weight: 4 --- diff --git a/content/en/altinity-kb-integrations/altinity-cloud/_index.md b/content/en/altinity-kb-integrations/altinity-cloud/_index.md index fc03237ffc..94f729920e 100644 --- a/content/en/altinity-kb-integrations/altinity-cloud/_index.md +++ b/content/en/altinity-kb-integrations/altinity-cloud/_index.md @@ -1,7 +1,81 @@ --- -title: "Cloud Services" -linkTitle: "Cloud Services" +title: "Altinity Cloud Access Management" +linkTitle: "Altinity Cloud Access Management" description: > - Tips and tricks for using ClickHouse with different cloud services. -weight: 4 + Enabling access_management for Altinity.Cloud databases. +weight: 5 +alias: /altinity-kb-integrations/altinity-cloud --- +Organizations that want to enable administrative users in their Altinity.Cloud ClickHouse® servers can do so by enabling `access_management` manually. This allows for administrative users to be created on the specific ClickHouse Cluster. + +{{% alert title="WARNING" color="warning" %}} +Modifying the ClickHouse cluster settings manually can lead to the cluster not loading or other issues. Change settings only with full consultation with an Altinity.Cloud support team member, and be ready to remove settings if they cause any disruption of service. +{{% /alert %}} + +To add the `access_management` setting to an Altinity.Cloud ClickHouse Cluster: + +1. Log into your Altinity.Cloud account. +1. For the cluster to modify, select **Configure -> Settings**. + + {{< figure src="/assets/altinity-cloud-cluster-settings-configure.png" width="400" title="Cluster setting configure" >}} + +1. From the Settings page, select **+ADD SETTING**. + + {{< figure src="/assets/altinity-cloud-cluster-add-setting.png" title="Add cluster setting" >}} + +1. Set the following options: + 1. **Setting Type**: Select **users.d file**. + 1. **Filename**: `access_management.xml` + 1. **Contents**: Enter the following to allow the `clickhouse_operator` that controls the cluster through the `clickhouse-operator` the ability to set administrative options: + + ```xml + + + + 1 + + + 1 + + + + ``` + + access_management=1 means that users `admin`, `clickhouse_operator` are able to create users and grant them privileges using SQL. + +1. Select **OK**. The cluster will restart, and users can now be created in the cluster that can be granted administrative access. + +1. If you are running ClickHouse 21.9 and above you can enable storing access management in ZooKeeper. in this case it will be automatically propagated to the cluster. This requires yet another configuration file: + 1. **Setting Type**: Select **config.d file** + 2. **Filename**: `user_directories.xml` + 3. **Contents**: + + ```xml + + + + /etc/clickhouse-server/users.xml + + + /clickhouse/access/ + + + /var/lib/clickhouse/access/ + + + + ``` + +[//]: # (---) + +[//]: # (title: "Cloud Services") + +[//]: # (linkTitle: "Cloud Services") + +[//]: # (description: >) + +[//]: # ( Tips and tricks for using ClickHouse® with different cloud services.) + +[//]: # (weight: 4) + +[//]: # (---) diff --git a/content/en/altinity-kb-integrations/altinity-cloud/altinity-cloud-access-management.md b/content/en/altinity-kb-integrations/altinity-cloud/altinity-cloud-access-management.md index 9803007c8f..b34ea1e85c 100644 --- a/content/en/altinity-kb-integrations/altinity-cloud/altinity-cloud-access-management.md +++ b/content/en/altinity-kb-integrations/altinity-cloud/altinity-cloud-access-management.md @@ -4,8 +4,10 @@ linkTitle: "Altinity Cloud Access Management" description: > Enabling access_management for Altinity.Cloud databases. weight: 5 +alias: /altinity-kb-integrations/altinity-cloud +draft: true --- -Organizations that want to enable administrative users in their Altinity.Cloud ClickHouse servers can do so by enabling `access_management` manually. This allows for administrative users to be created on the specific ClickHouse Cluster. +Organizations that want to enable administrative users in their Altinity.Cloud ClickHouse® servers can do so by enabling `access_management` manually. This allows for administrative users to be created on the specific ClickHouse Cluster. {{% alert title="WARNING" color="warning" %}} Modifying the ClickHouse cluster settings manually can lead to the cluster not loading or other issues. Change settings only with full consultation with an Altinity.Cloud support team member, and be ready to remove settings if they cause any disruption of service. @@ -28,7 +30,7 @@ To add the `access_management` setting to an Altinity.Cloud ClickHouse Cluster: 1. **Contents**: Enter the following to allow the `clickhouse_operator` that controls the cluster through the `clickhouse-operator` the ability to set administrative options: ```xml - + 1 @@ -37,7 +39,7 @@ To add the `access_management` setting to an Altinity.Cloud ClickHouse Cluster: 1 - + ``` access_management=1 means that users `admin`, `clickhouse_operator` are able to create users and grant them privileges using SQL. @@ -50,7 +52,7 @@ To add the `access_management` setting to an Altinity.Cloud ClickHouse Cluster: 3. **Contents**: ```xml - + /etc/clickhouse-server/users.xml @@ -58,6 +60,9 @@ To add the `access_management` setting to an Altinity.Cloud ClickHouse Cluster: /clickhouse/access/ + + /var/lib/clickhouse/access/ + - + ``` diff --git a/content/en/altinity-kb-integrations/altinity-kb-google-s3-gcs.md b/content/en/altinity-kb-integrations/altinity-kb-google-s3-gcs.md index b0fa975f78..eb1f5b99c1 100644 --- a/content/en/altinity-kb-integrations/altinity-kb-google-s3-gcs.md +++ b/content/en/altinity-kb-integrations/altinity-kb-google-s3-gcs.md @@ -1,9 +1,8 @@ --- title: "Google S3 (GCS)" linkTitle: "Google S3 (GCS)" -description: > - "Google S3 GCS" --- + GCS with the table function - seems to work correctly for simple scenarios. Essentially you can follow the steps from the [Migrating from Amazon S3 to Cloud Storage](https://cloud.google.com/storage/docs/aws-simple-migration). @@ -11,8 +10,5 @@ Essentially you can follow the steps from the [Migrating from Amazon S3 to Cloud 1. Set up a GCS bucket. 2. This bucket must be set as part of the default project for the account. This configuration can be found in settings -> interoperability. 3. Generate a HMAC key for the account, can be done in settings -> interoperability, in the section for user account access keys. -4. In ClickHouse, replace the S3 bucket endpoint with the GCS bucket endpoint This must be done with the path-style GCS endpoint: `https://storage.googleapis.com/BUCKET_NAME/OBJECT_NAME`. +4. In ClickHouse®, replace the S3 bucket endpoint with the GCS bucket endpoint This must be done with the path-style GCS endpoint: `https://storage.googleapis.com/BUCKET_NAME/OBJECT_NAME`. 5. Replace the aws access key id and aws secret access key with the corresponding parts of the HMAC key. - - -s3 Disk on the top of GCS and writing to GSC may be NOT working because GCS don't support some of bulk S3 API calls, see https://github.com/ClickHouse/ClickHouse/issues/24246 diff --git a/content/en/altinity-kb-integrations/altinity-kb-kafka/_index.md b/content/en/altinity-kb-integrations/altinity-kb-kafka/_index.md index 4a6de0c312..8d663c25f2 100644 --- a/content/en/altinity-kb-integrations/altinity-kb-kafka/_index.md +++ b/content/en/altinity-kb-integrations/altinity-kb-kafka/_index.md @@ -8,8 +8,9 @@ description: > git log -- contrib/librdkafka | git name-rev --stdin ``` -| **ClickHouse version** | **librdkafka version** | +| **ClickHouse® version** | **librdkafka version** | | :--- | :--- | +| 25.3+ ([\#63697](https://github.com/ClickHouse/ClickHouse/issues/63697)) | [2.8.0](https://github.com/confluentinc/librdkafka/blob/v2.8.0/CHANGELOG.md) + few [fixes](https://gist.github.com/filimonov/ad252aa601d4d99fb57d4d76f14aa2bf) | | 21.10+ ([\#27883](https://github.com/ClickHouse/ClickHouse/pull/27883)) | [1.6.1](https://github.com/edenhill/librdkafka/blob/v1.6.1/CHANGELOG.md) + snappy fixes + boring ssl + illumos_build fixes + edenhill#3279 fix| | 21.6+ ([\#23874](https://github.com/ClickHouse/ClickHouse/pull/23874)) | [1.6.1](https://github.com/edenhill/librdkafka/blob/v1.6.1/CHANGELOG.md) + snappy fixes + boring ssl + illumos_build fixes| | 21.1+ ([\#18671](https://github.com/ClickHouse/ClickHouse/pull/18671)) | [1.6.0-RC3](https://github.com/edenhill/librdkafka/blob/v1.6.0-RC3/CHANGELOG.md) + snappy fixes + boring ssl | diff --git a/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-adjusting-librdkafka-settings.md b/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-adjusting-librdkafka-settings.md index 0a693d0648..716bab1e5e 100644 --- a/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-adjusting-librdkafka-settings.md +++ b/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-adjusting-librdkafka-settings.md @@ -7,42 +7,51 @@ description: > * To set rdkafka options - add to `` section in `config.xml` or preferably use a separate file in `config.d/`: * [https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) -Some random example: +Some random example using SSL certificates to authenticate: ```xml - - 60000 - 60000 - 10000 - 5000 - 60000 - 20000 - 500 - 20971520 - all - SSL - /etc/clickhouse-server/ssl/kafka-ca-qa.crt - /etc/clickhouse-server/ssl/client_clickhouse_client.pem - /etc/clickhouse-server/ssl/client_clickhouse_client.key - pass - + + + 60000 + 60000 + 10000 + 5000 + 60000 + 20000 + 500 + 20971520 + all + SSL + /etc/clickhouse-server/ssl/kafka-ca-qa.crt + /etc/clickhouse-server/ssl/client_clickhouse_client.pem + /etc/clickhouse-server/ssl/client_clickhouse_client.key + pass + + ``` ## Authentication / connectivity -### Amazon MSK +Sometimes the consumer group needs to be explicitly allowed in the broker UI config. + +### Amazon MSK | SASL/SCRAM ```xml sasl_ssl + + root toor ``` +- [Broker ports detail](https://docs.aws.amazon.com/msk/latest/developerguide/port-info.html) +- [Read here more](https://leftjoin.ru/blog/data-engineering/clickhouse-as-a-consumer-to-amazon-msk/) (Russian language) + -### SASL/SCRAM +### on-prem / self-hosted Kafka broker ```xml @@ -51,17 +60,18 @@ Some random example: SCRAM-SHA-512 root toor + + /path/to/cert/fullchain.pem ``` -[https://leftjoin.ru/all/clickhouse-as-a-consumer-to-amazon-msk/](https://leftjoin.ru/all/clickhouse-as-a-consumer-to-amazon-msk/) ### Inline Kafka certs To connect to some Kafka cloud services you may need to use certificates. -If needed they can be converted to pem format and inlined into ClickHouse config.xml +If needed they can be converted to pem format and inlined into ClickHouse® config.xml Example: ```xml @@ -81,11 +91,11 @@ Example: ``` -See xml +See -[https://help.aiven.io/en/articles/489572-getting-started-with-aiven-kafka](https://help.aiven.io/en/articles/489572-getting-started-with-aiven-kafka) +- [https://help.aiven.io/en/articles/489572-getting-started-with-aiven-kafka](https://help.aiven.io/en/articles/489572-getting-started-with-aiven-kafka) -[https://stackoverflow.com/questions/991758/how-to-get-pem-file-from-key-and-crt-files](https://stackoverflow.com/questions/991758/how-to-get-pem-file-from-key-and-crt-files) +- [https://stackoverflow.com/questions/991758/how-to-get-pem-file-from-key-and-crt-files](https://stackoverflow.com/questions/991758/how-to-get-pem-file-from-key-and-crt-files) ### Azure Event Hub @@ -105,33 +115,32 @@ See [https://github.com/ClickHouse/ClickHouse/issues/12609](https://github.com/C ``` -### confluent cloud +### Confluent Cloud / Google Cloud ```xml - - - smallest - SASL_SSL - https - PLAIN -xmlusername - password - probe - - - + + + smallest + SASL_SSL + + + PLAIN + username + password + + + + ``` - -[https://docs.confluent.io/cloud/current/client-apps/config-client.html](https://docs.confluent.io/cloud/current/client-apps/config-client.html) +- [https://docs.confluent.io/cloud/current/client-apps/config-client.html](https://docs.confluent.io/cloud/current/client-apps/config-client.html) +- [https://cloud.google.com/managed-service-for-apache-kafka/docs/authentication-kafka](https://cloud.google.com/managed-service-for-apache-kafka/docs/authentication-kafka) ## How to test connection settings -Use kafkacat utility - it internally uses same library to access Kafla as clickhouse itself and allows easily to test different settings. +Use kafkacat utility - it internally uses same library to access Kafla as ClickHouse itself and allows easily to test different settings. ```bash -kafkacat -b my_broker:9092 -C -o -10 -t my_topic \ +kafkacat -b my_broker:9092 -C -o -10 -t my_topic \ (Google cloud and on-prem use 9092 port) -X security.protocol=SASL_SSL \ -X sasl.mechanisms=PLAIN \ -X sasl.username=uerName \ @@ -139,7 +148,7 @@ kafkacat -b my_broker:9092 -C -o -10 -t my_topic \ ``` -# Different configurations for different tables? +## Different configurations for different tables? > Is there some more documentation how to use this multiconfiguration for Kafka ? @@ -148,7 +157,7 @@ https://github.com/ClickHouse/ClickHouse/blob/da4856a2be035260708fe2ba3ffb9e437d So it load the main config first, after that it load (with overwrites) the configs for all topics, **listed in `kafka_topic_list` of the table**. -Also since v21.12 it's possible to use more straght-forward way using named_collections: +Also since v21.12 it's possible to use more straightforward way using named_collections: https://github.com/ClickHouse/ClickHouse/pull/31691 So you can say something like @@ -169,7 +178,28 @@ And after that in configuration: + + + + + + ... + foo.bar + foo.bar.group + + ... + ... + ... + ... + smallest + https + probe + + + + + ``` The same fragment of code in newer versions: -https://github.com/ClickHouse/ClickHouse/blob/d19e24f530c30f002488bc136da78f5fb55aedab/src/Storages/Kafka/StorageKafka.cpp#L474-L496 +- https://github.com/ClickHouse/ClickHouse/blob/d19e24f530c30f002488bc136da78f5fb55aedab/src/Storages/Kafka/StorageKafka.cpp#L474-L496 diff --git a/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-exactly-once-semantics.md b/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-exactly-once-semantics.md index 3432db9fd5..fa54523134 100644 --- a/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-exactly-once-semantics.md +++ b/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-exactly-once-semantics.md @@ -4,7 +4,7 @@ linkTitle: "Exactly once semantics" description: > Exactly once semantics --- -EOS consumer (isolation.level=read_committed) is enabled by default since librdkafka 1.2.0, so for ClickHouse - since 20.2 +EOS consumer (isolation.level=read_committed) is enabled by default since librdkafka 1.2.0, so for ClickHouse® - since 20.2 See: @@ -18,6 +18,6 @@ We need to have something like transactions on ClickHouse side to be able to avo ## block-aggregator by eBay -Block Aggregator is a data loader that subscribes to Kafka topics, aggregates the Kafka messages into blocks that follow the Clickhouse’s table schemas, and then inserts the blocks into ClickHouse. Block Aggregator provides exactly-once delivery guarantee to load data from Kafka to ClickHouse. Block Aggregator utilizes Kafka’s metadata to keep track of blocks that are intended to send to ClickHouse, and later uses this metadata information to deterministically re-produce ClickHouse blocks for re-tries in case of failures. The identical blocks are guaranteed to be deduplicated by ClickHouse. +Block Aggregator is a data loader that subscribes to Kafka topics, aggregates the Kafka messages into blocks that follow the ClickHouse’s table schemas, and then inserts the blocks into ClickHouse. Block Aggregator provides exactly-once delivery guarantee to load data from Kafka to ClickHouse. Block Aggregator utilizes Kafka’s metadata to keep track of blocks that are intended to send to ClickHouse, and later uses this metadata information to deterministically re-produce ClickHouse blocks for re-tries in case of failures. The identical blocks are guaranteed to be deduplicated by ClickHouse. [eBay/block-aggregator](https://github.com/eBay/block-aggregator) diff --git a/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-kafka-main-parsing-loop.md b/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-kafka-main-parsing-loop.md index 2b9bd975f9..74eeab4326 100644 --- a/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-kafka-main-parsing-loop.md +++ b/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-kafka-main-parsing-loop.md @@ -4,7 +4,7 @@ linkTitle: "Kafka main parsing loop" description: > Kafka main parsing loop --- -One of the threads from scheduled_pool (pre 20.9) / `background_message_broker_schedule_pool` (after 20.9) do that in infinite loop: +One of the threads from scheduled_pool (pre ClickHouse® 20.9) / `background_message_broker_schedule_pool` (after 20.9) do that in infinite loop: 1. Batch poll (time limit: `kafka_poll_timeout_ms` 500ms, messages limit: `kafka_poll_max_batch_size` 65536) 2. Parse messages. @@ -31,3 +31,8 @@ You may want to adjust those depending on your scenario: ## See also [https://github.com/ClickHouse/ClickHouse/pull/11388](https://github.com/ClickHouse/ClickHouse/pull/11388) + +## Disable at-least-once delivery + +`kafka_commit_every_batch` = 1 will change the loop logic mentioned above. Consumed batch committed to the Kafka and the block of rows send to Materialized Views only after that. It could be resembled as at-most-once delivery mode as prevent duplicate creation but allow loss of data in case of failures. + diff --git a/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-kafka-mv-consuming.md b/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-kafka-mv-consuming.md new file mode 100644 index 0000000000..50c4b2ca6f --- /dev/null +++ b/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-kafka-mv-consuming.md @@ -0,0 +1,120 @@ +--- +title: "Multiple MVs attached to Kafka table" +linkTitle: "Multiple MVs attached to Kafka table" +description: > + How Multiple MVs attached to Kafka table consume and how they are affected by kafka_num_consumers/kafka_thread_per_consumer +--- + +Kafka Consumer is a thread inside the Kafka Engine table that is visible by Kafka monitoring tools like kafka-consumer-groups and in Clickhouse in system.kafka_consumers table. + +Having multiple consumers increases ingesting parallelism and can significantly speed up event processing. However, it comes with a trade-off: it's a CPU-intensive task, especially under high event load and/or complicated parsing of incoming data. Therefore, it's crucial to create as many consumers as you really need and ensure you have enough CPU cores to handle them. We don’t recommend creating too many Kafka Engines per server because it could lead to uncontrolled CPU usage in situations like bulk data upload or catching up a huge kafka lag due to excessive parallelism of the ingesting process. + +## kafka_thread_per_consumer meaning + +Consider a basic pipeline depicted as a Kafka table with 2 MVs attached. The Kafka broker has 2 topics and 4 partitions. + +### kafka_thread_per_consumer = 0 + +Kafka engine table will act as 2 consumers, but only 1 insert thread for both of them. It is important to note that the topic needs to have as many partitions as consumers. For this scenario, we use these settings: + +``` +kafka_num_consumers = 2 +kafka_thread_per_consumer = 0 +``` + +The same Kafka engine will create 2 streams, 1 for each consumer, and will join them in a union stream. And it will use 1 thread for inserting `[ 2385 ]` +This is how we can see it in the logs: + +```log +2022.11.09 17:49:34.282077 [ 2385 ] {} StorageKafka (kafka_table): Started streaming to 2 attached views +``` + +* How ClickHouse® calculates the number of threads depending on the `thread_per_consumer` setting: + + ```c++ + auto stream_count = thread_per_consumer ? 1 : num_created_consumers; + sources.reserve(stream_count); + pipes.reserve(stream_count); + for (size_t i = 0; i < stream_count; ++i) + { + ...... + } + ``` + +Details: + +https://github.com/ClickHouse/ClickHouse/blob/1b49463bd297ade7472abffbc931c4bb9bf213d0/src/Storages/Kafka/StorageKafka.cpp#L834 + + +Also, a detailed graph of the pipeline: + +![thread_per_consumer0](/assets/thread_per_consumer0.png) + +With this approach, even if the number of consumers increased, the Kafka engine will still use only 1 thread to flush. The consuming/processing rate will probably increase a bit, but not linearly. For example, 5 consumers will not consume 5 times faster. Also, a good property of this approach is the `linearization` of INSERTS, which means that the order of the inserts is preserved and sequential. This option is good for small/medium Kafka topics. + + +### kafka_thread_per_consumer = 1 + +Kafka engine table will act as 2 consumers and 1 thread per consumer. For this scenario, we use these settings: + +``` +kafka_num_consumers = 2 +kafka_thread_per_consumer = 1 +``` + +Here, the pipeline works like this: + +![thread_per_consumer1](/assets/thread_per_consumer1.png) + + +With this approach, the number of consumers remains the same, but each consumer will use their own insert/flush thread, and the consuming/processing rate should increase. + +## Background Pool + +In Clickhouse there is a special thread pool for background processes, such as streaming engines. Its size is controlled by the background_message_broker_schedule_pool_size setting and is 16 by default. If you exceed this limit across all tables on the server, you’ll likely encounter continuous Kafka rebalances, which will slow down processing considerably. For a server with a lot of CPU cores, you can increase that limit to a higher value, like 20 or even 40. `background_message_broker_schedule_pool_size` = 20 allows you to create 5 Kafka Engine tables with 4 consumers each of them has its own insert thread. This option is good for large Kafka topics with millions of messages per second. + + +## Multiple Materialized Views + +Attaching multiple Materialized Views (MVs) to a Kafka Engine table can be used when you need to apply different transformations to the same topic and store the resulting data in different tables. + +(This approach also applies to the other streaming engines - RabbitMQ, s3queue, etc). + +All streaming engines begin processing data (reading from the source and producing insert blocks) only after at least one Materialized View is attached to the engine. Multiple Materialized Views can be connected to distribute data across various tables with different transformations. But how does it work when the server starts? + +Once the first Materialized View (MV) is loaded, started, and attached to the Kafka/s3queue table, data consumption begins immediately—data is read from the source, pushed to the destination, and the pointers advance to the next position. However, any other MVs that haven't started yet will miss the data consumed by the first MV, leading to some data loss. + +This issue worsens with asynchronous table loading. Tables are only loaded upon first access, and the loading process takes time. When multiple MVs direct the data stream to different tables, some tables might be ready sooner than others. As soon as the first table becomes ready, data consumption starts, and any tables still loading will miss the data consumed during that interval, resulting in further data loss for those tables. + + +That means when you make a design with Multiple MVs `async_load_databases` should be switched off: + +```sql +false +``` + +Also, you have to prevent starting to consume until all MVs are loaded and started. For that, you can add an additional Null table to the MV pipeline, so the Kafka table will pass the block to a single Null table first, and only then many MVs start their own transformations to many dest tables: + + KafkaTable → dummy_MV -> NullTable -> [MV1, MV2, ….] → [Table1, Table2, …] + +```sql +create table NullTable Engine=Null as KafkaTable; +create materialized view dummy_MV to NullTable +select * from KafkaTable +--WHERE NOT ignore(throwIf(if((uptime() < 120), 1 , 0))) +WHERE NOT ignore(throwIf(if((uptime() < 120), 1 + sleep(3), 0))) +``` + +120 seconds should be enough for loading all MVs. + +Using an intermediate Null table is also preferable because it's easier to make any changes with MVs: + +- drop the dummy_MV to stop consuming +- make any changes to transforming MVs by drop/recreate +- create dummy_MV again to resume consuming + +The fix for correctly starting multiple MVs will be available from 25.5 version - https://github.com/ClickHouse/ClickHouse/pull/72123 + + + + diff --git a/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-kafka-parallel-consuming.md b/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-kafka-parallel-consuming.md index 53c06a8972..4f7b62d5d3 100644 --- a/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-kafka-parallel-consuming.md +++ b/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-kafka-parallel-consuming.md @@ -4,7 +4,7 @@ linkTitle: "Kafka parallel consuming" description: > Kafka parallel consuming --- -For very large topics when you need more parallelism (especially on the insert side) you may use several tables with the same pipeline (pre 20.9) or enable `kafka_thread_per_consumer` (after 20.9). +For very large topics when you need more parallelism (especially on the insert side) you may use several tables with the same pipeline (pre ClickHouse® 20.9) or enable `kafka_thread_per_consumer` (after 20.9). ```ini kafka_num_consumers = N, @@ -15,5 +15,7 @@ Notes: * the inserts will happen in parallel (without that setting inserts happen linearly) * enough partitions are needed. +* `kafka_num_consumers` is limited by number of physical cores (half of vCPUs). `kafka_disable_num_consumers_limit` can be used to override the limit. +* `background_message_broker_schedule_pool_size` is 16 by default, you may need to increase if using more than 16 consumers Before increasing `kafka_num_consumers` with keeping `kafka_thread_per_consumer=0` may improve consumption & parsing speed, but flushing & committing still happens by a single thread there (so inserts are linear). diff --git a/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-rewind-fast-forward-replay.md b/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-rewind-fast-forward-replay.md index a2dae43a0d..cc655531d7 100644 --- a/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-rewind-fast-forward-replay.md +++ b/content/en/altinity-kb-integrations/altinity-kb-kafka/altinity-kb-rewind-fast-forward-replay.md @@ -4,10 +4,16 @@ linkTitle: "Rewind / fast-forward / replay" description: > Rewind / fast-forward / replay --- -* Step 1: Detach Kafka tables in ClickHouse +* Step 1: Detach Kafka tables in ClickHouse® + ``` + DETACH TABLE db.kafka_table_name ON CLUSTER '{cluster}'; + ``` * Step 2: `kafka-consumer-groups.sh --bootstrap-server kafka:9092 --topic topic:0,1,2 --group id1 --reset-offsets --to-latest --execute` * More samples: [https://gist.github.com/filimonov/1646259d18b911d7a1e8745d6411c0cc](https://gist.github.com/filimonov/1646259d18b911d7a1e8745d6411c0cc) -* Step: Attach Kafka tables back +* Step 3: Attach Kafka tables back + ``` + ATTACH TABLE db.kafka_table_name ON CLUSTER '{cluster}'; + ``` See also these configuration settings: @@ -16,3 +22,13 @@ See also these configuration settings: smallest ``` +### About Offset Consuming + +When a consumer joins the consumer group, the broker will check if it has a committed offset. If that is the case, then it will start from the latest offset. Both ClickHouse and librdKafka documentation state that the default value for `auto_offset_reset` is largest (or `latest` in new Kafka versions) but it is not, if the consumer is new: + +https://github.com/ClickHouse/ClickHouse/blob/f171ad93bcb903e636c9f38812b6aaf0ab045b04/src/Storages/Kafka/StorageKafka.cpp#L506 + +  `conf.set("auto.offset.reset", "earliest");     // If no offset stored for this group, read all messages from the start` + +If there is no offset stored or it is out of range, for that particular consumer group, the consumer will start consuming from the beginning (`earliest`), and if there is some offset stored then it should use the `latest`. +The log retention policy influences which offset values correspond to the `earliest` and `latest` configurations. Consider a scenario where a topic has a retention policy set to 1 hour. Initially, you produce 5 messages, and then, after an hour, you publish 5 more messages. In this case, the latest offset will remain unchanged from the previous example. However, due to Kafka removing the earlier messages, the earliest available offset will not be 0; instead, it will be 5. diff --git a/content/en/altinity-kb-integrations/altinity-kb-kafka/background_message_broker_schedule_pool_size.md b/content/en/altinity-kb-integrations/altinity-kb-kafka/background_message_broker_schedule_pool_size.md new file mode 100644 index 0000000000..108fc13992 --- /dev/null +++ b/content/en/altinity-kb-integrations/altinity-kb-kafka/background_message_broker_schedule_pool_size.md @@ -0,0 +1,131 @@ +--- +title: "Setting the background message broker schedule pool size" +linkTitle: "Setting the background message broker schedule pool size" +weight: 100 +description: >- + Guide to managing the `background_message_broker_schedule_pool_size` setting for Kafka, RabbitMQ, and NATS table engines in your database. +--- + +## Overview + +When using Kafka, RabbitMQ, or NATS table engines in ClickHouse®, you may encounter issues related to a saturated background thread pool. One common symptom is a warning similar to the following: + +``` +2025.03.14 08:44:26.725868 [ 344 ] {} StorageKafka (events_kafka): [rdk:MAXPOLL] [thrd:main]: Application maximum poll interval (60000ms) exceeded by 159ms (adjust max.poll.interval.ms for long-running message processing): leaving group +``` + +This warning typically appears **not because ClickHouse fails to poll**, but because **there are no available threads** in the background pool to handle the polling in time. In rare cases, the same error might also be caused by long flushing operations to Materialized Views (MVs), especially if their logic is complex or chained. + +To resolve this, you should monitor and, if needed, increase the value of the `background_message_broker_schedule_pool_size` setting. + +--- + +## Step 1: Check Thread Pool Utilization + +Run the following SQL query to inspect the current status of your background message broker thread pool: + +```sql +SELECT + ( + SELECT value + FROM system.metrics + WHERE metric = 'BackgroundMessageBrokerSchedulePoolTask' + ) AS tasks, + ( + SELECT value + FROM system.metrics + WHERE metric = 'BackgroundMessageBrokerSchedulePoolSize' + ) AS pool_size, + pool_size - tasks AS free_threads +``` + +If you have `metric_log` enabled, you can also monitor the **minimum number of free threads over the day**: + +```sql +SELECT min(CurrentMetric_BackgroundMessageBrokerSchedulePoolSize - CurrentMetric_BackgroundMessageBrokerSchedulePoolTask) AS min_free_threads +FROM system.metric_log +WHERE event_date = today() +``` + +**If `free_threads` is close to zero or negative**, it means your thread pool is saturated and should be increased. + +--- + +## Step 2: Estimate Required Pool Size + +To estimate a reasonable value for `background_message_broker_schedule_pool_size`, run the following query: + +```sql +WITH + toUInt32OrDefault(extract(engine_full, 'kafka_num_consumers\s*=\s*(\d+)')) as kafka_num_consumers, + extract(engine_full, 'kafka_thread_per_consumer\s*=\s*(\d+|\'true\')') not in ('', '0') as kafka_thread_per_consumer, + multiIf( + engine = 'Kafka', + if(kafka_thread_per_consumer AND kafka_num_consumers > 0, kafka_num_consumers, 1), + engine = 'RabbitMQ', + 3, + engine = 'NATS', + 3, + 0 /* should not happen */ + ) as threads_needed +SELECT + ceil(sum(threads_needed) * 1.25) +FROM + system.tables +WHERE + engine in ('Kafka', 'RabbitMQ', 'NATS') +``` + +This will return an estimate that includes a 25% buffer to accommodate spikes in load. + +--- + +## Step 3: Apply the New Setting + +1. **Create or update** the following configuration file: + + **Path:** `/etc/clickhouse-server/config.d/background_message_broker_schedule_pool_size.xml` + + **Content:** + ```xml + + 120 + + ``` + + Replace `120` with the value recommended from Step 2 (rounded up if needed). + +2. **(Only for ClickHouse versions 23.8 and older)** + + Add the same setting to the default user profile: + + **Path:** `/etc/clickhouse-server/users.d/background_message_broker_schedule_pool_size.xml` + + **Content:** + ```xml + + + + 120 + + + + ``` + +--- + +## Step 4: Restart ClickHouse + +After applying the configuration, restart ClickHouse to apply the changes: + +```bash +sudo systemctl restart clickhouse-server +``` + +--- + +## Summary + +A saturated background message broker thread pool can lead to missed Kafka polls and consumer group dropouts. Monitoring your metrics and adjusting `background_message_broker_schedule_pool_size` accordingly ensures stable operation of Kafka, RabbitMQ, and NATS integrations. + +If the problem persists even after increasing the pool size, consider investigating slow MV chains or flushing logic as a potential bottleneck. diff --git a/content/en/altinity-kb-integrations/altinity-kb-kafka/error-handling.md b/content/en/altinity-kb-integrations/altinity-kb-kafka/error-handling.md index e9e2f533de..b2ac0fef88 100644 --- a/content/en/altinity-kb-integrations/altinity-kb-kafka/error-handling.md +++ b/content/en/altinity-kb-integrations/altinity-kb-kafka/error-handling.md @@ -14,7 +14,7 @@ It's also possible to skip up to N malformed messages for each block, with used ## After 21.6 -It's possible to stream messages which could not be parsed, this behavior could be enabled via setting: `kafka_handle_error_mode='stream'` and clickhouse wil write error and message from Kafka itself to two new virtual columns: `_error, _raw_message`. +It's possible to stream messages which could not be parsed, this behavior could be enabled via setting: `kafka_handle_error_mode='stream'` and ClickHouse® wil write error and message from Kafka itself to two new virtual columns: `_error, _raw_message`. So you can create another Materialized View which would collect to a separate table all errors happening while parsing with all important information like offset and content of message. @@ -31,7 +31,7 @@ kafka_group_name = 'clickhouse', kafka_format = 'JSONEachRow', kafka_handle_error_mode='stream'; -CREATE MATERIALIZED VIEW default.kafka_errors +CREATE TABLE default.kafka_errors ( `topic` String, `partition` Int64, @@ -41,7 +41,11 @@ CREATE MATERIALIZED VIEW default.kafka_errors ) ENGINE = MergeTree ORDER BY (topic, partition, offset) -SETTINGS index_granularity = 8192 AS +SETTINGS index_granularity = 8192 + + +CREATE MATERIALIZED VIEW default.kafka_errors_mv TO default.kafka_errors +AS SELECT _topic AS topic, _partition AS partition, @@ -52,9 +56,15 @@ FROM default.kafka_engine WHERE length(_error) > 0 ``` +## Since 25.8 + +dead letter queue can be used via setting: `kafka_handle_error_mode='dead_letter'` [https://github.com/ClickHouse/ClickHouse/pull/68873](https://github.com/ClickHouse/ClickHouse/pull/68873) + + + ![Table connections](/assets/Untitled-2021-08-05-1027.png) -[https://github.com/ClickHouse/ClickHouse/pull/20249\#issuecomment-779054737](https://github.com/ClickHouse/ClickHouse/pull/20249\#issuecomment-779054737) +[https://github.com/ClickHouse/ClickHouse/pull/20249](https://github.com/ClickHouse/ClickHouse/pull/20249) [https://github.com/ClickHouse/ClickHouse/pull/21850](https://github.com/ClickHouse/ClickHouse/pull/21850) diff --git a/content/en/altinity-kb-integrations/altinity-kb-kafka/kafka-schema-inference.md b/content/en/altinity-kb-integrations/altinity-kb-kafka/kafka-schema-inference.md new file mode 100644 index 0000000000..a47e65a6c9 --- /dev/null +++ b/content/en/altinity-kb-integrations/altinity-kb-kafka/kafka-schema-inference.md @@ -0,0 +1,88 @@ +--- +title: "Inferring Schema from AvroConfluent Messages in Kafka for ClickHouse®" +linkTitle: "Schema Inference for Kafka" +weight: 100 +description: >- + Learn how to define Kafka table structures in ClickHouse® by using Avro's schema registry & sample message. +--- + +To consume messages from Kafka within ClickHouse®, you need to define the `ENGINE=Kafka` table structure with all the column names and types. +This task can be particularly challenging when dealing with complex Avro messages, as manually determining the exact schema for +ClickHouse is both tricky and time-consuming. This complexity is particularly frustrating in the case of Avro formats, +where the column names and their types are already clearly defined in the schema registry. + +Although ClickHouse supports schema inference for files, it does not natively support this for Kafka streams. + +Here’s a workaround to infer the schema using AvroConfluent messages: + +## Step 1: Capture and Store a Raw Kafka Message + +First, create a table in ClickHouse to consume a raw message from Kafka and store it as a file: + +```sql +CREATE TABLE test_kafka (raw String) ENGINE = Kafka +SETTINGS kafka_broker_list = 'localhost:29092', + kafka_topic_list = 'movies-raw', + kafka_format = 'RawBLOB', -- Don't try to parse the message, return it 'as is' + kafka_group_name = 'tmp_test'; -- Using some dummy consumer group here. + +INSERT INTO FUNCTION file('./avro_raw_sample.avro', 'RawBLOB') +SELECT * FROM test_kafka LIMIT 1 +SETTINGS max_block_size=1, stream_like_engine_allow_direct_select=1; + +DROP TABLE test_kafka; +``` + +## Step 2: Infer Schema Using the Stored File +Using the stored raw message, let ClickHouse infer the schema based on the AvroConfluent format and a specified schema registry URL: + +```sql +CREATE TEMPORARY TABLE test AS +SELECT * FROM file('./avro_raw_sample.avro', 'AvroConfluent') +SETTINGS format_avro_schema_registry_url='http://localhost:8085'; + +SHOW CREATE TEMPORARY TABLE test\G; +``` +The output from the `SHOW CREATE` command will display the inferred schema, for example: + +```plaintext +Row 1: +────── +statement: CREATE TEMPORARY TABLE test +( + `movie_id` Int64, + `title` String, + `release_year` Int64 +) +ENGINE = Memory +``` + +## Step 3: Create the Kafka Table with the Inferred Schema +Now, use the inferred schema to create the Kafka table: + +```sql +CREATE TABLE movies_kafka +( + `movie_id` Int64, + `title` String, + `release_year` Int64 +) +ENGINE = Kafka +SETTINGS kafka_broker_list = 'localhost:29092', + kafka_topic_list = 'movies-raw', + kafka_format = 'AvroConfluent', + kafka_group_name = 'movies', + kafka_schema_registry_url = 'http://localhost:8085'; +``` + +This approach reduces manual schema definition efforts and enhances data integration workflows by utilizing the schema inference capabilities of ClickHouse for AvroConfluent messages. + +## Appendix + +**Avro** is a binary serialization format used within Apache Kafka for efficiently serializing data with a compact binary format. It relies on schemas, which define the structure of the serialized data, to ensure robust data compatibility and type safety. + +**Schema Registry** is a service that provides a centralized repository for Avro schemas. It helps manage and enforce schemas across applications, ensuring that the data exchanged between producers and consumers adheres to a predefined format, and facilitates schema evolution in a safe manner. + +In ClickHouse, the **Avro** format is used for data that contains the schema embedded directly within the file or message. This means the structure of the data is defined and included with the data itself, allowing for self-describing messages. However, embedding the schema within every message is not optimal for streaming large volumes of data, as it increases the workload and network overhead. Repeatedly passing the same schema with each message can be inefficient, particularly in high-throughput environments. + +On the other hand, the **AvroConfluent** format in ClickHouse is specifically designed to work with the Confluent Schema Registry. This format expects the schema to be managed externally in a schema registry rather than being embedded within each message. It retrieves schema information from the Schema Registry, which allows for centralized schema management and versioning, facilitating easier schema evolution and enforcement across different applications using Kafka. diff --git a/content/en/altinity-kb-integrations/altinity-kb-rabbitmq/_index.md b/content/en/altinity-kb-integrations/altinity-kb-rabbitmq/_index.md new file mode 100644 index 0000000000..93ca8288d0 --- /dev/null +++ b/content/en/altinity-kb-integrations/altinity-kb-rabbitmq/_index.md @@ -0,0 +1,28 @@ +--- +title: "RabbitMQ" +linkTitle: "RabbitMQ" +description: > + RabbitMQ engine in ClickHouse® 24.3+ +--- + +### Settings + +Basic RabbitMQ settings and use cases: https://clickhouse.com/docs/en/engines/table-engines/integrations/rabbitmq + +### Latest improvements/fixes + +##### (v23.10+) + +- **Allow to save unparsed records and errors in RabbitMQ**: +NATS and FileLog engines. Add virtual columns `_error` and `_raw_message` (for NATS and RabbitMQ), `_raw_record` (for FileLog) that are filled when ClickHouse fails to parse new record. +The behaviour is controlled under storage settings `nats_handle_error_mode` for NATS, `rabbitmq_handle_error_mode` for RabbitMQ, `handle_error_mode` for FileLog similar to `kafka_handle_error_mode`. +If it's set to `default`, en exception will be thrown when ClickHouse fails to parse a record, if it's set to `stream`, error and raw record will be saved into virtual columns. +Closes [#36035](https://github.com/ClickHouse/ClickHouse/issues/36035) and [#55477](https://github.com/ClickHouse/ClickHouse/pull/55477) + + +##### (v24+) + +- [#45350 RabbitMq Storage Engine should NACK messages if exception is thrown during processing](https://github.com/ClickHouse/ClickHouse/issues/45350) +- [#59775 rabbitmq: fix having neither acked nor nacked messages](https://github.com/ClickHouse/ClickHouse/pull/59775) +- [#60312 Make rabbitmq nack broken messages](https://github.com/ClickHouse/ClickHouse/pull/60312) +- [#61320 Fix logical error in RabbitMQ storage with MATERIALIZED columns](https://github.com/ClickHouse/ClickHouse/pull/61320) diff --git a/content/en/altinity-kb-integrations/altinity-kb-rabbitmq/error-handling.md b/content/en/altinity-kb-integrations/altinity-kb-rabbitmq/error-handling.md new file mode 100644 index 0000000000..4acbb34434 --- /dev/null +++ b/content/en/altinity-kb-integrations/altinity-kb-rabbitmq/error-handling.md @@ -0,0 +1,56 @@ +--- +title: "RabbitMQ Error handling" +linkTitle: "RabbitMQ Error handling" +description: > + Error handling for RabbitMQ table engine +--- + +Same approach as in Kafka but virtual columns are different. Check https://clickhouse.com/docs/en/engines/table-engines/integrations/rabbitmq#virtual-columns + +```sql +CREATE TABLE IF NOT EXISTS rabbitmq.broker_errors_queue +( + exchange_name String, + channel_id String, + delivery_tag UInt64, + redelivered UInt8, + message_id String, + timestamp UInt64 +) +engine = RabbitMQ +SETTINGS + rabbitmq_host_port = 'localhost:5672', + rabbitmq_exchange_name = 'exchange-test', -- required parameter even though this is done via the rabbitmq config + rabbitmq_queue_consume = true, + rabbitmq_queue_base = 'test-errors', + rabbitmq_format = 'JSONEachRow', + rabbitmq_username = 'guest', + rabbitmq_password = 'guest', + rabbitmq_handle_error_mode = 'stream'; + +CREATE MATERIALIZED VIEW IF NOT EXISTS rabbitmq.broker_errors_mv +( + exchange_name String, + channel_id String, + delivery_tag UInt64, + redelivered UInt8, + message_id String, + timestamp UInt64 + raw_message String, + error String +) +ENGINE = MergeTree +ORDER BY (error) +SETTINGS index_granularity = 8192 AS +SELECT + _exchange_name AS exchange_name, + _channel_id AS channel_id, + _delivery_tag AS delivery_tag, + _redelivered AS redelivered, + _message_id AS message_id, + _timestamp AS timestamp, + _raw_message AS raw_message, + _error AS error +FROM rabbitmq.broker_errors_queue +WHERE length(_error) > 0 +``` diff --git a/content/en/altinity-kb-integrations/bi-tools.md b/content/en/altinity-kb-integrations/bi-tools.md index 0928c8b263..ee1d2f09ea 100644 --- a/content/en/altinity-kb-integrations/bi-tools.md +++ b/content/en/altinity-kb-integrations/bi-tools.md @@ -7,7 +7,7 @@ description: > * Superset: [https://superset.apache.org/docs/databases/clickhouse](https://superset.apache.org/docs/databases/clickhouse) * Metabase: [https://github.com/enqueue/metabase-clickhouse-driver](https://github.com/enqueue/metabase-clickhouse-driver) * Querybook: [https://www.querybook.org/docs/setup_guide/connect_to_query_engines/\#all-query-engines](https://www.querybook.org/docs/setup_guide/connect_to_query_engines/#all-query-engines) -* Tableau: [Clickhouse Tableau connector odbc](https://github.com/Altinity/clickhouse-tableau-connector-odbc) +* Tableau: [Altinity Tableau Connector for ClickHouse®](https://github.com/Altinity/tableau-connector-for-clickhouse) support both JDBC & ODBC drivers * Looker: [https://docs.looker.com/setup-and-management/database-config/clickhouse](https://docs.looker.com/setup-and-management/database-config/clickhouse) * Apache Zeppelin * SeekTable diff --git a/content/en/altinity-kb-integrations/catboost-mindsdb-fast.ai.md b/content/en/altinity-kb-integrations/catboost-mindsdb-fast.ai.md index acecf80f5a..ee41487d1c 100644 --- a/content/en/altinity-kb-integrations/catboost-mindsdb-fast.ai.md +++ b/content/en/altinity-kb-integrations/catboost-mindsdb-fast.ai.md @@ -11,7 +11,7 @@ Article is based on feedback provided by one of Altinity clients. CatBoost: * It uses gradient boosting - a hard to use technique which can outperform neural networks. Gradient boosting is powerful but it's easy to shoot yourself in the foot using it. -* The documentation on how to use it is quite lacking. The only good source of information on how to properly configure a model to yield good results is this video: [https://www.youtube.com/watch?v=usdEWSDisS0](https://www.youtube.com/watch?v=usdEWSDisS0) . We had to dig around GitHub issues to find out how to make it work with ClickHouse. +* The documentation on how to use it is quite lacking. The only good source of information on how to properly configure a model to yield good results is this video: [https://www.youtube.com/watch?v=usdEWSDisS0](https://www.youtube.com/watch?v=usdEWSDisS0) . We had to dig around GitHub issues to find out how to make it work with ClickHouse®. * CatBoost is fast. Other libraries will take ~5X to ~10X as long to do what CatBoost does. * CatBoost will do preprocessing out of the box (fills nulls, apply standard scaling, encodes strings as numbers). * CatBoost has all functions you'd need (metrics, plotters, feature importance) diff --git a/content/en/altinity-kb-integrations/clickhouse-odbc.md b/content/en/altinity-kb-integrations/clickhouse-odbc.md index eec3a2e5c6..d8c66cf95f 100644 --- a/content/en/altinity-kb-integrations/clickhouse-odbc.md +++ b/content/en/altinity-kb-integrations/clickhouse-odbc.md @@ -1,16 +1,14 @@ --- -title: "ODBC Driver for ClickHouse" -linkTitle: "ODBC Driver for ClickHouse" +title: "ODBC Driver for ClickHouse®" +linkTitle: "ODBC Driver for ClickHouse®" weight: 100 description: >- - ODBC Driver for ClickHouse + ODBC Driver for ClickHouse® --- -# ODBC Driver for ClickHouse. +[ODBC](https://docs.microsoft.com/en-us/sql/odbc/reference/odbc-overview) interface for ClickHouse® RDBMS. -[ODBC](https://docs.microsoft.com/en-us/sql/odbc/reference/odbc-overview) interface for [ClickHouse](https://clickhouse.yandex) RDBMS. - -Licensed under the [Apache 2.0](LICENSE). +Licensed under the [Apache 2.0](https://github.com/ClickHouse/clickhouse-odbc?tab=Apache-2.0-1-ov-file#readme). ## Installation and usage @@ -21,7 +19,7 @@ Licensed under the [Apache 2.0](LICENSE). 3. Configure ClickHouse DSN. Note: that install driver linked against MDAC (which is default for Windows), some non-windows native -applications (cygwin / msys64 based) may require driver linked agains unixodbc. Build section below. +applications (cygwin / msys64 based) may require driver linked against unixodbc. Build section below. ### MacOS @@ -30,7 +28,7 @@ applications (cygwin / msys64 based) may require driver linked agains unixodbc. ```bash brew install https://raw.githubusercontent.com/proller/homebrew-core/chodbc/Formula/clickhouse-odbc.rb ``` -3. Add clickhouse DSN configuration into ~/.odbc.ini file. ([sample]()) +3. Add ClickHouse DSN configuration into ~/.odbc.ini file. ([sample]()) Note: that install driver linked against iodbc (which is default for Mac), some homebrew applications (like python) may require unixodbc driver to work properly. In that case see Build section below. @@ -38,7 +36,7 @@ Note: that install driver linked against iodbc (which is default for Mac), some ### Linux 1. DEB/RPM packaging is not provided yet, please build & install the driver from sources. -2. Add clickhouse DSN configuration into ~/.odbc.ini file. ([sample]()) +2. Add ClickHouse DSN configuration into ~/.odbc.ini file. ([sample]()) ## Configuration @@ -49,29 +47,29 @@ On Windows you can create/edit DSN using GUI tool through Control Panel. The list of DSN parameters recognized by the driver is as follows: -| Parameter | Default value | Description | -| :-----------------: | :----------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `Url` | empty | URL that points to a running ClickHouse instance, may include username, password, port, database, etc. | -| `Proto` | deduced from `Url`, or from `Port` and `SSLMode`: `https` if `443` or `8443` or `SSLMode` is not empty, `http` otherwise | Protocol, one of: `http`, `https` | -| `Server` or `Host` | deduced from `Url` | IP or hostname of a server with a running ClickHouse instance on it | -| `Port` | deduced from `Url`, or from `Proto`: `8443` if `https`, `8123` otherwise | Port on which the ClickHouse instance is listening | -| `Path` | `/query` | Path portion of the URL | -| `UID` or `Username` | `default` | User name | -| `PWD` or `Password` | empty | Password | -| `Database` | `default` | Database name to connect to | -| `Timeout` | `30` | Connection timeout | -| `SSLMode` | empty | Certificate verification method (used by TLS/SSL connections, ignored in Windows), one of: `allow`, `prefer`, `require`, use `allow` to enable [`SSL_VERIFY_PEER`](https://www.openssl.org/docs/manmaster/man3/SSL_CTX_set_verify.html) TLS/SSL certificate verification mode, [`SSL_VERIFY_PEER \| SSL_VERIFY_FAIL_IF_NO_PEER_CERT`](https://www.openssl.org/docs/manmaster/man3/SSL_CTX_set_verify.html) is used otherwise | -| `PrivateKeyFile` | empty | Path to private key file (used by TLS/SSL connections), can be empty if no private key file is used | -| `CertificateFile` | empty | Path to certificate file (used by TLS/SSL connections, ignored in Windows), if the private key and the certificate are stored in the same file, this can be empty if `PrivateKeyFile` is specified | -| `CALocation` | empty | Path to the file or directory containing the CA/root certificates (used by TLS/SSL connections, ignored in Windows) | -| `DriverLog` | `on` if `CMAKE_BUILD_TYPE` is `Debug`, `off` otherwise | Enable or disable the extended driver logging | -| `DriverLogFile` | `\temp\clickhouse-odbc-driver.log` on Windows, `/tmp/clickhouse-odbc-driver.log` otherwise | Path to the extended driver log file (used when `DriverLog` is `on`) | +| Parameter | Default value | Description | +| :-----------------: | :----------------------------------------------------------------------------------------------------------------------: |:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Url` | empty | URL that points to a running ClickHouse instance, may include username, password, port, database, etc. | +| `Proto` | deduced from `Url`, or from `Port` and `SSLMode`: `https` if `443` or `8443` or `SSLMode` is not empty, `http` otherwise | Protocol, one of: `http`, `https` | +| `Server` or `Host` | deduced from `Url` | IP or hostname of a server with a running ClickHouse instance on it | +| `Port` | deduced from `Url`, or from `Proto`: `8443` if `https`, `8123` otherwise | Port on which the ClickHouse instance is listening | +| `Path` | `/query` | Path portion of the URL | +| `UID` or `Username` | `default` | User name | +| `PWD` or `Password` | empty | Password | +| `Database` | `default` | Database name to connect to | +| `Timeout` | `30` | Connection timeout | +| `SSLMode` | empty | Certificate verification method (used by TLS/SSL connections, ignored in Windows), one of: `allow`, `prefer`, `require`, use `allow` to enable [SSL_VERIFY_PEER](https://www.openssl.org/docs/manmaster/man3/SSL_CTX_set_verify.html) TLS/SSL certificate verification mode, [SSL_VERIFY_PEER \| SSL_VERIFY_FAIL_IF_NO_PEER_CERT](https://www.openssl.org/docs/manmaster/man3/SSL_CTX_set_verify.html) is used otherwise | +| `PrivateKeyFile` | empty | Path to private key file (used by TLS/SSL connections), can be empty if no private key file is used | +| `CertificateFile` | empty | Path to certificate file (used by TLS/SSL connections, ignored in Windows), if the private key and the certificate are stored in the same file, this can be empty if `PrivateKeyFile` is specified | +| `CALocation` | empty | Path to the file or directory containing the CA/root certificates (used by TLS/SSL connections, ignored in Windows) | +| `DriverLog` | `on` if `CMAKE_BUILD_TYPE` is `Debug`, `off` otherwise | Enable or disable the extended driver logging | +| `DriverLogFile` | `\temp\clickhouse-odbc-driver.log` on Windows, `/tmp/clickhouse-odbc-driver.log` otherwise | Path to the extended driver log file (used when `DriverLog` is `on`) | ## Troubleshooting & bug reporting -If some software doesn't work properly with that driver, but works good with other drivers - we will be appritiate if you will be able to collect debug info. +If some software doesn't work properly with that driver, but works good with other drivers - we will be appropriate if you will be able to collect debug info. To debug issues with the driver, first things that need to be done are: - enabling driver manager tracing. Links may contain some irrelevant vendor-specific details. @@ -142,7 +140,7 @@ brew install git cmake make poco openssl libiodbc # You may use unixodbc INSTEAD **Note:** usually on Linux you use unixODBC driver manager, and on Mac - iODBC. In some (rare) cases you may need use other driver manager, please do it only -if you clearly understand the differencies. Driver should be used with the driver +if you clearly understand the differences. Driver should be used with the driver manager it was linked to. Clone the repo with submodules: diff --git a/content/en/altinity-kb-integrations/mysql-clickhouse.md b/content/en/altinity-kb-integrations/mysql-clickhouse.md index 73166c6fb4..34a7d4b9c5 100644 --- a/content/en/altinity-kb-integrations/mysql-clickhouse.md +++ b/content/en/altinity-kb-integrations/mysql-clickhouse.md @@ -1,28 +1,36 @@ --- title: "MySQL" -linkTitle: "Integration Clickhouse with MySQL" +linkTitle: "Integrating ClickHouse® with MySQL" weight: 100 -description: >- - Integration Clickhouse with MySQL --- ### Replication using MaterializeMySQL. -- https://clickhouse.tech/docs/en/engines/database-engines/materialized-mysql/ +- https://clickhouse.com/docs/en/engines/database-engines/materialized-mysql - https://translate.google.com/translate?sl=auto&tl=en&u=https://www.jianshu.com/p/d0d4306411b3 - https://raw.githubusercontent.com/ClickHouse/clickhouse-presentations/master/meetup47/materialize_mysql.pdf -It reads mysql binlog directly and transform queries into something which clickhouse can support. Supports updates and deletes (under the hood implemented via something like ReplacingMergeTree with enforced FINAL and 'deleted' flag). Status is 'experimental', there are quite a lot of known limitations and issues, but some people use it. The original author of that went to another project, and the main team don't have a lot of resource to improve that for now (more important thing in the backlog) +It reads mysql binlog directly and transform queries into something which ClickHouse® can support. Supports updates and deletes (under the hood implemented via something like ReplacingMergeTree with enforced FINAL and 'deleted' flag). Status is 'experimental', there are quite a lot of known limitations and issues, but some people use it. The original author of that went to another project, and the main team don't have a lot of resource to improve that for now (more important thing in the backlog) The replication happens on the mysql database level. -### Replication using debezium + Kafka +### Replication using debezium + Kafka (+ Altinity Sink Connector for ClickHouse) -Debezium can read the binlog and transform it to Kafka messages. You can later capture the stream of message on ClickHouse side and process it as you like. -Please remeber that currently Kafka engine supports only at-least-once delivery guarantees. +Debezium can read the binlog and transform it to Kafka messages. +You can later capture the stream of message on ClickHouse side and process it as you like. +Please remember that currently Kafka engine supports only at-least-once delivery guarantees. It's used by several companies, quite nice & flexible. But initial setup may require some efforts. +#### Altinity Sink Connector for ClickHouse + +Can handle transformation of debezium messages (with support for DELETEs and UPDATEs) and exactly-once delivery for you. + +Links: +* https://altinity.com/blog/fast-mysql-to-clickhouse-replication-announcing-the-altinity-sink-connector-for-clickhouse +* https://altinity.com/mysql-to-clickhouse/ +* https://github.com/Altinity/clickhouse-sink-connector + #### Same as above but using https://maxwells-daemon.io/ instead of debezium. Have no experience / feedback there, but should be very similar to debezium. @@ -32,16 +40,16 @@ Have no experience / feedback there, but should be very similar to debezium. See https://altinity.com/blog/2018/6/30/realtime-mysql-clickhouse-replication-in-practice That was done long time ago in altinity for one use-case, and it seem like it was never used outside of that. -It's a python application with lot of switches which can copy a schema or read binlog from mysql and put it to clickhouse. +It's a python application with lot of switches which can copy a schema or read binlog from mysql and put it to ClickHouse. Not supported currently. But it's just a python, so maybe can be adjusted to different needs. -### Accessing MySQL data via integration engines from inside clickhouse. +### Accessing MySQL data via integration engines from inside ClickHouse. -MySQL [table engine](https://clickhouse.com/docs/en/engines/table-engines/integrations/mysql/) / [table function](https://clickhouse.com/docs/en/sql-reference/table-functions/mysql/), or [MySQL database engine](https://clickhouse.com/docs/en/engines/database-engines/mysql/) - clickhouse just connects to mysql server as a client, and can do normal selects. +MySQL [table engine](https://clickhouse.com/docs/en/engines/table-engines/integrations/mysql/) / [table function](https://clickhouse.com/docs/en/sql-reference/table-functions/mysql/), or [MySQL database engine](https://clickhouse.com/docs/en/engines/database-engines/mysql/) - ClickHouse just connects to mysql server as a client, and can do normal selects. We had webinar about that a year ago: https://www.youtube.com/watch?v=44kO3UzIDLI -Using that you can easily create some ETL script which will copy the data from mysql to clickhouse regularly, i.e. something like +Using that you can easily create some ETL script which will copy the data from mysql to ClickHouse regularly, i.e. something like ```sql INSERT INTO clickhouse_table SELECT * FROM mysql_table WHERE id > ... @@ -49,7 +57,7 @@ INSERT INTO clickhouse_table SELECT * FROM mysql_table WHERE id > ... Works great if you have append only table in MySQL. -In newer clickhouse versions you can query this was also sharded / replicated MySQL cluster - see [ExternalDistributed](https://clickhouse.com/docs/en/engines/table-engines/integrations/ExternalDistributed/) +In newer ClickHouse versions you can query this was also sharded / replicated MySQL cluster - see [ExternalDistributed](https://clickhouse.com/docs/en/engines/table-engines/integrations/ExternalDistributed/) ### MySQL dictionaries diff --git a/content/en/altinity-kb-interfaces/_index.md b/content/en/altinity-kb-interfaces/_index.md index 5fb1e32fa8..e8b5b644f7 100644 --- a/content/en/altinity-kb-interfaces/_index.md +++ b/content/en/altinity-kb-interfaces/_index.md @@ -4,6 +4,6 @@ linkTitle: "Interfaces" keywords: - clickhouse interface description: > - See the frequent questions users have about clickhouse-client. + Frequent questions users have about `clickhouse-client` weight: 9 --- diff --git a/content/en/altinity-kb-interfaces/altinity-kb-clickhouse-client.md b/content/en/altinity-kb-interfaces/altinity-kb-clickhouse-client.md index 4ffaba09a4..4346abd242 100644 --- a/content/en/altinity-kb-interfaces/altinity-kb-clickhouse-client.md +++ b/content/en/altinity-kb-interfaces/altinity-kb-clickhouse-client.md @@ -4,7 +4,7 @@ linkTitle: "clickhouse-client" keywords: - clickhouse client description: > - ClickHouse client + ClickHouse® client --- Q. How can I input multi-line SQL code? can you guys give me an example? @@ -50,4 +50,4 @@ Also, it’s possible to have several client config files and pass one of them t References: -* [https://clickhouse.tech/docs/en/interfaces/cli/](https://clickhouse.tech/docs/en/interfaces/cli/) +* [https://clickhouse.com/docs/en/interfaces/cli](https://clickhouse.com/docs/en/interfaces/cli) diff --git a/content/en/altinity-kb-kubernetes/_index.md b/content/en/altinity-kb-kubernetes/_index.md index d569a64ee2..09ca08dc0d 100644 --- a/content/en/altinity-kb-kubernetes/_index.md +++ b/content/en/altinity-kb-kubernetes/_index.md @@ -1,13 +1,565 @@ --- -title: "Kubernetes" -linkTitle: "Kubernetes" +title: "Using the Altinity Kubernetes Operator for ClickHouse®" +linkTitle: "Using the Altinity Kubernetes Operator for ClickHouse®" keywords: - clickhouse in kubernetes - kubernetes issues +- ALtinity Kubernetes operator for ClickHouse description: > - Run ClickHouse in Kubernetes without any issues. + Run ClickHouse® in Kubernetes without any issues. weight: 8 +aliases: + /altinity-kb-kubernetes/altinity-kb-possible-issues-with-running-clickhouse-in-k8s/ --- -## clickhouse-backup +## Useful links + +The Altinity Kubernetes Operator for ClickHouse® repo has very useful documentation: + +- [Quick Start Guide](https://github.com/Altinity/clickhouse-operator/blob/master/docs/quick_start.md) +- [Operator Custom Resource Definition explained](https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md) +- [Examples - YAML files to deploy the operator in many common configurations](https://github.com/Altinity/clickhouse-operator/tree/master/docs/chi-examples) +- [Main documentation](https://github.com/Altinity/clickhouse-operator/tree/master/docs#table-of-contents) + +## ClickHouse Operator ip filter + +- In the current version of operator default user is limited to IP addresses of the cluster pods. We plan to have a password option for 0.20.0 and use a 'secret' authentication for distributed queries + +## Start/Stop cluster + +- Don't delete the operator using: + +```bash +kubectl delete -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-bundle.yaml +``` + +- kubectl delete chi cluster-name # chi is the name of the CRD clickhouseInstallation + +## DELETE PVCs + +https://altinity.com/blog/preventing-clickhouse-storage-deletion-with-the-altinity-kubernetes-operator-reclaimpolicy + +## Scaling + +Best way is to scale down the deployments to 0 replicas, after that reboot the node and scale up again: + +1. first check that all your PVCs have the retain policy: + +```bash +kubectl get pv -o=custom-columns=PV:.metadata.name,NAME:.spec.claimRef.name,POLICY:.spec.persistentVolumeReclaimPolicy +# Patch it if you need +kubectl patch pv -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' +``` + +```yaml +spec: + templates: + volumeClaimTemplates: + - name: XXX + reclaimPolicy: Retain +``` + +2. After that just create a stop.yaml and `kubectl apply -f stop.yaml` + +```yaml +kind: ClickHouseInstallation +spec: + stop: yes +``` + +3. Reboot kubernetes node +4. Scale up deployment changing the stop property to no and do an `kubectl apply -f stop.yml` + +```yaml +kind: ClickHouseInstallation +spec: + stop: no +``` + +## Check where pods are executing + +```bash +kubectl get pod -o=custom-columns=NAME:.metadata.name,STATUS:.status.phase,NODE:.spec.nodeName -n zk +# Check which hosts in which AZs +kubectl get node -o=custom-columns=NODE:.metadata.name,ZONE:.metadata.labels.'failure-domain\.beta\.kubernetes\.io/zone' +``` + +## Check node instance types: + +```sql +kubectl get nodes -o json|jq -Cjr '.items[] | .metadata.name," ",.metadata.labels."beta.kubernetes.io/instance-type"," ",.metadata.labels."beta.kubernetes.io/arch", "\n"'|sort -k3 -r + +ip-10-3-9-2.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-9-236.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-9-190.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-9-138.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-9-110.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-8-39.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-8-219.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-8-189.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-13-40.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-12-248.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-12-216.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-12-170.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-11-229.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-11-188.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-11-175.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-10-218.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-10-160.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-10-145.eu-central-1.compute.internal t4g.large arm64 +ip-10-3-9-57.eu-central-1.compute.internal m5.large amd64 +ip-10-3-8-146.eu-central-1.compute.internal m5.large amd64 +ip-10-3-13-1.eu-central-1.compute.internal m5.xlarge amd64 +ip-10-3-11-52.eu-central-1.compute.internal m5.xlarge amd64 +ip-10-3-11-187.eu-central-1.compute.internal m5.xlarge amd64 +ip-10-3-10-217.eu-central-1.compute.internal m5.xlarge amd64 +``` + +## Search for missing affinity rules: + +```bash +kubectl get pods -o json -n zk |\ +jq -r "[.items[] | {name: .metadata.name,\ + affinity: .spec.affinity}]" +[ + { + "name": "zookeeper-0", + "affinity": null + }, + . . . +] +``` + +## Storage classes + +```bash +kubectl get pvc -o=custom-columns=NAME:.metadata.name,SIZE:.spec.resources.requests.storage,CLASS:.spec.storageClassName,VOLUME:.spec.volumeName +... +NAME SIZE CLASS VOLUME +datadir-volume-zookeeper-0 25Gi gp2 pvc-9a3...9ee + +kubectl get storageclass/gp2 +... +NAME PROVISIONER RECLAIMPOLICY... +gp2 (default) ebs.csi.aws.com Delete +``` + +## Using CSI driver to protect storage: + +```yaml +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: gp2-protected +parameters: + encrypted: "true" + type: gp2 +provisioner: ebs.csi.aws.com +reclaimPolicy: Retain +volumeBindingMode: WaitForFirstConsumer +``` + +## Enable Resize of Volumes + +Operator does not delete volumes, so those were probably deleted by Kubernetes. In some new versions there is a feature flag that deletes PVCs attached to STS when STS is deleted. + +Please try do the following: Use operator 0.20.3. Add the following to the defaults: +`` + +```yaml + defaults: + storageManagement: + provisioner: Operator +``` + +That enables storage management by operator, instead of STS. It allows to extend volumes without re-creating STS, and us increase Volume size without restart of clickhouse statefulset pods for CSI drivers which support `allowVolumeExpansion` in storage classes because statefulset template don't change and we don't need delete/create statefulset + +## Change server settings: + +https://github.com/Altinity/clickhouse-operator/issues/828 + +```yaml +kind: ClickHouseInstallation +spec: + configuration: + settings: + max_concurrent_queries: 150 +``` + +Or **edit ClickHouseInstallation:** + +```bash +kubectl -n get chi + +NAME CLUSTERS HOSTS STATUS HOSTS-COMPLETED AGE +dnieto-test 1 4 Completed 211d +mbak-test 1 1 Completed 44d +rory-backupmar8 1 4 Completed 42h + +kubectl -n edit ClickHouseInstallation dnieto-test +``` + +## Clickhouse-backup for CHOP + +Examples for use clickhouse-backup + clickhouse-operator for EKS cluster which not managed by `altinity.cloud` + +Main idea: second container in clickhouse pod +  CronJob which will insert and poll `system.backup_actions` commands to execute clickhouse-backup commands + +https://github.com/AlexAkulov/clickhouse-backup/blob/master/Examples.md#how-to-use-clickhouse-backup-in-kubernetes + +## Configurations: + +How to modify yaml configs: + +https://github.com/Altinity/clickhouse-operator/blob/dc6cdc6f2f61fc333248bb78a8f8efe792d14ca2/tests/e2e/manifests/chi/test-016-settings-04.yaml#L26 + +## clickhouse-operator install Example: + +use latest release if possible +https://github.com/Altinity/clickhouse-operator/releases + +- No. Nodes/replicas: 2 to 3 nodes with 500GB per node minimum +- Zookeeper: 3 node ensemble +- Type of instances: m6i.x4large to start with and you can go up to m6i.16xlarge +- Persistent Storage/volumes: EBS gp2 for data and logs and gp3 for zookeeper + +### Install operator in namespace + +```bash +#!/bin/bash + +# Namespace to install operator into +OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-dnieto-test-chop}" +# Namespace to install metrics-exporter into +METRICS_EXPORTER_NAMESPACE="${OPERATOR_NAMESPACE}" +# Operator's docker image +OPERATOR_IMAGE="${OPERATOR_IMAGE:-altinity/clickhouse-operator:latest}" +# Metrics exporter's docker image +METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE:-altinity/metrics-exporter:latest}" + +# Setup clickhouse-operator into specified namespace +kubectl apply --namespace="${OPERATOR_NAMESPACE}" -f <( \ + curl -s https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-template.yaml | \ + OPERATOR_IMAGE="${OPERATOR_IMAGE}" \ + OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE}" \ + METRICS_EXPORTER_IMAGE="${METRICS_EXPORTER_IMAGE}" \ + METRICS_EXPORTER_NAMESPACE="${METRICS_EXPORTER_NAMESPACE}" \ + envsubst \ +) +``` + +### Install zookeeper ensemble + +zookeepers will be named like zookeeper-0.zoons + +```bash +> kubectl create ns zoo3ns +> kubectl -n zoo3ns apply -f https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/zookeeper/quick-start-persistent-volume/zookeeper-3-nodes-1GB-for-tests-only.yaml + +# check names they should be like: +# zookeeper.zoo3ns if using a new namespace +# If using the same namespace zookeeper. +# zookeeper must be accessed using the service like service_name.namespace +``` + +### Deploy test cluster + +```bash +> kubectl -n dnieto-test-chop apply -f dnieto-test-chop.yaml +``` + +```yaml +# dnieto-test-chop.yaml +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" +metadata: + name: "dnieto-dev" +spec: + configuration: + settings: + max_concurrent_queries: "200" + merge_tree/ttl_only_drop_parts: "1" + profiles: + default/queue_max_wait_ms: "10000" + readonly/readonly: "1" + users: + admin/networks/ip: + - 0.0.0.0/0 + - '::/0' + admin/password_sha256_hex: "" + admin/profile: default + admin/access_management: 1 + zookeeper: + nodes: + - host: zookeeper.dnieto-test-chop + port: 2181 + clusters: + - name: dnieto-dev + templates: + podTemplate: pod-template-with-volumes + serviceTemplate: chi-service-template + layout: + shardsCount: 1 + # put the number of desired nodes 3 by default + replicasCount: 2 + templates: + podTemplates: + - name: pod-template-with-volumes + spec: + containers: + - name: clickhouse + image: clickhouse/clickhouse-server:22.3 + # separate data from logs + volumeMounts: + - name: data-storage-vc-template + mountPath: /var/lib/clickhouse + - name: log-storage-vc-template + mountPath: /var/log/clickhouse-server + serviceTemplates: + - name: chi-service-template + generateName: "service-{chi}" + # type ObjectMeta struct from k8s.io/meta/v1 + metadata: + annotations: + # https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + # this tags for elb load balancer + #service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp + #service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + #https://kubernetes.io/docs/concepts/services-networking/service/#aws-nlb-support + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + service.beta.kubernetes.io/aws-load-balancer-type: nlb + spec: + ports: + - name: http + port: 8123 + - name: client + port: 9000 + type: LoadBalancer + volumeClaimTemplates: + - name: data-storage-vc-template + spec: + # no storageClassName - means use default storageClassName + # storageClassName: default + # here if you have a storageClassName defined for gp2 you can use it. + # kubectl get storageclass + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + reclaimPolicy: Retain + - name: log-storage-vc-template + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi +``` + +### Install monitoring: + +In order to setup prometheus as a backend for all the asynchronous_metric_log / metric_log tables and also set up grafana dashboards: + +- https://github.com/Altinity/clickhouse-operator/blob/master/docs/prometheus_setup.md +- https://github.com/Altinity/clickhouse-operator/blob/master/docs/grafana_setup.md +- [clickhouse-operator/monitoring_setup.md at master · Altinity/clickhouse-operator](https://github.com/Altinity/clickhouse-operator/blob/master/docs/monitoring_setup.md) + +## Extra configs + +There is an admin user by default in the deployment that is used to admin stuff + +## KUBECTL chi basic comands: + +```bash +*> kubectl get crd* + +NAME CREATED AT +clickhouseinstallations.clickhouse.altinity.com 2021-10-11T13:46:43Z +clickhouseinstallationtemplates.clickhouse.altinity.com 2021-10-11T13:46:44Z +clickhouseoperatorconfigurations.clickhouse.altinity.com 2021-10-11T13:46:44Z +eniconfigs.crd.k8s.amazonaws.com 2021-10-11T13:41:23Z +grafanadashboards.integreatly.org 2021-10-11T13:54:37Z +grafanadatasources.integreatly.org 2021-10-11T13:54:38Z +grafananotificationchannels.integreatly.org 2022-05-17T14:27:48Z +grafanas.integreatly.org 2021-10-11T13:54:37Z +provisioners.karpenter.sh 2022-05-17T14:27:49Z +securitygrouppolicies.vpcresources.k8s.aws 2021-10-11T13:41:27Z +volumesnapshotclasses.snapshot.storage.k8s.io 2022-04-22T13:34:20Z +volumesnapshotcontents.snapshot.storage.k8s.io 2022-04-22T13:34:20Z +volumesnapshots.snapshot.storage.k8s.io 2022-04-22T13:34:20Z + +> *kubectl -n test-clickhouse-operator-dnieto2 get chi* +NAME CLUSTERS HOSTS STATUS HOSTS-COMPLETED AGE +simple-01 70m + +> *kubectl -n test-clickhouse-operator-dnieto2 describe chi simple-01* +Name: simple-01 +Namespace: test-clickhouse-operator-dnieto2 +Labels: +Annotations: +API Version: clickhouse.altinity.com/v1 +Kind: ClickHouseInstallation +Metadata: + Creation Timestamp: 2023-01-09T20:38:06Z + Generation: 1 + Managed Fields: + API Version: clickhouse.altinity.com/v1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:configuration: + .: + f:clusters: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2023-01-09T20:38:06Z + Resource Version: 267483138 + UID: d7018efa-2b13-42fd-b1c5-b798fc6d0098 +Spec: + Configuration: + Clusters: + Name: simple +Events: + +> *kubectl get chi --all-namespaces* + +NAMESPACE NAME CLUSTERS HOSTS STATUS HOSTS-COMPLETED AGE +andrey-dev source 1 1 Completed 38d +eu chi-dnieto-test-common-configd 1 1 Completed 161d +eu dnieto-test 1 4 Completed 151d +laszlo-dev node-rescale-2 1 4 Completed 5d13h +laszlo-dev single 1 1 Completed 5d13h +laszlo-dev2 zk2 1 1 Completed 52d +test-clickhouse-operator-dnieto2 simple-01 + +> *kubectl -n test-clickhouse-operator-dnieto2 edit clickhouseinstallations.clickhouse.altinity.com simple-01 + +# Troubleshoot operator stuff +> kubectl -n test-clickhouse-operator-ns edit chi +> kubectl -n test-clickhouse-operator describe chi +> kubectl -n test-clickhouse-operator get chi -o yaml + +# Check operator logs usually located in kube-system or specific namespace +> kubectl -n test-ns logs chi-operator-pod -f + +# Check output to yaml +> kubectl -n test-ns get services -o yaml* +``` + +## Problem with DELETE finalizers: + +https://github.com/Altinity/clickhouse-operator/issues/830 + +There's a problem with stuck finalizers that can cause old CHI installations to hang. The sequence of operations looks like this. + +1. You delete the existing ClickHouse operator using `kubectl delete -f operator-installation.yaml` with running CHI clusters. +2. You then drop the namespace where the CHI clusters are running, e.g., `kubectl delete ns my-namespace` +3. This hangs. You run `kubectl get ns my-namespace -o yaml` and you'll see a message like the following: "message: 'Some content in the namespace has finalizers remaining: [finalizer.clickhouseinstallation.altinity.com](http://finalizer.clickhouseinstallation.altinity.com/)" + +That means the CHI can't delete because its finalizer was deleted out from under it. + +The fix is to figure out the chi name which should still be visible and edit it to remove the finalizer reference. + +1. `kubectl -n my-namespace get chi` +2. `kubectl -n my-namespace edit [clickhouseinstallations.clickhouse.altinity.com](http://clickhouseinstallations.clickhouse.altinity.com/) my-clickhouse-cluster` + +Remove the finalizer from the spec, save it, and everything will delete properly. + +**`TIP: if you delete the ns too and there is no ns just create it and apply the above method`** + +## Karpenter scaler + +```sql +> kubectl -n karpenter get all +NAME READY STATUS RESTARTS AGE +pod/karpenter-75c8b7667b-vbmj4 1/1 Running 0 16d +pod/karpenter-75c8b7667b-wszxt 1/1 Running 0 16d + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/karpenter ClusterIP 172.20.129.188 8080/TCP,443/TCP 16d + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/karpenter 2/2 2 2 16d + +NAME DESIRED CURRENT READY AGE +replicaset.apps/karpenter-75c8b7667b 2 2 2 16d + +> kubectl -n karpenter logs pod/karpenter-75c8b7667b-vbmj4 + +2023-02-06T06:33:44.269Z DEBUG Successfully created the logger. +2023-02-06T06:33:44.269Z DEBUG Logging level set to: debug +{"level":"info","ts":1675665224.2755454,"logger":"fallback","caller":"injection/injection.go:63","msg":"Starting informers..."} +2023-02-06T06:33:44.376Z DEBUG controller waiting for configmaps {"commit": "f60dacd", "configmaps": ["karpenter-global-settings"]} +2023-02-06T06:33:44.881Z DEBUG controller karpenter-global-settings config "karpenter-global-settings" config was added or updated: settings.Settings{BatchMaxDuration:v1.Duration{Duration:10000000000}, BatchIdleDuration:v1.Duration{Duration:1000000000}} {"commit": "f60dacd"} +2023-02-06T06:33:44.881Z DEBUG controller karpenter-global-settings config "karpenter-global-settings" config was added or updated: settings.Settings{ClusterName:"eu", ClusterEndpoint:"https://79974769E264251E43B18AF4CA31CE8C.gr7.eu-central-1.eks.amazonaws.com", DefaultInstanceProfile:"KarpenterNodeInstanceProfile-eu", EnablePodENI:false, EnableENILimitedPodDensity:true, IsolatedVPC:false, NodeNameConvention:"ip-name", VMMemoryOverheadPercent:0.075, InterruptionQueueName:"Karpenter-eu", Tags:map[string]string{}} {"commit": "f60dacd"} +2023-02-06T06:33:45.001Z DEBUG controller.aws discovered region {"commit": "f60dacd", "region": "eu-central-1"} +2023-02-06T06:33:45.003Z DEBUG controller.aws unable to detect the IP of the kube-dns service, services "kube-dns" is forbidden: User "system:serviceaccount:karpenter:karpenter" cannot get resource "services" in API group "" in the namespace "kube-system" {"commit": "f60dacd"} +2023/02/06 06:33:45 Registering 2 clients +2023/02/06 06:33:45 Registering 2 informer factories +2023/02/06 06:33:45 Registering 3 informers +2023/02/06 06:33:45 Registering 6 controllers +2023-02-06T06:33:45.080Z DEBUG controller.aws discovered version {"commit": "f60dacd", "version": "v0.20.0"} +2023-02-06T06:33:45.082Z INFO controller Starting server {"commit": "f60dacd", "path": "/metrics", "kind": "metrics", "addr": "[::]:8080"} +2023-02-06T06:33:45.082Z INFO controller Starting server {"commit": "f60dacd", "kind": "health probe", "addr": "[::]:8081"} +I0206 06:33:45.182600 1 leaderelection.go:248] attempting to acquire leader lease karpenter/karpenter-leader-election... +2023-02-06T06:33:45.226Z INFO controller Starting informers... {"commit": "f60dacd"} +2023-02-06T06:33:45.417Z INFO controller.aws.pricing updated spot pricing with instance types and offerings {"commit": "f60dacd", "instance-type-count": 607, "offering-count": 1400} +2023-02-06T06:33:47.670Z INFO controller.aws.pricing updated on-demand pricing {"commit": "f60dacd", "instance-type-count": 505} +``` + +## Operator Affinities: + +![Screenshot from 2023-02-21 11-26-36.png](https://s3-us-west-2.amazonaws.com/secure.notion-static.com/90052686-7c87-413f-95f7-41c12d233190/Screenshot_from_2023-02-21_11-26-36.png) + +## Deploy operator with clickhouse-keeper + +https://github.com/Altinity/clickhouse-operator/issues/959 [setup-example.yaml](https://github.com/Altinity/clickhouse-operator/blob/eb3fc4e28514d0d6ea25a40698205b02949bcf9d/docs/chi-examples/03-persistent-volume-07-do-not-chown.yaml) + +## Possible issues with running ClickHouse in K8s + +The biggest problem with running ClickHouse® in K8s, happens when clickhouse-server can't start for some reason and pod is falling in CrashloopBackOff, so you can't easily get in the pod and check/fix/restart ClickHouse. + +There is multiple possible reasons for this, some of them can be fixed without manual intervention in pod: + +1. Wrong configuration files Fix: Check templates which are being used for config file generation and fix them. +2. While upgrade some backward incompatible changes prevents ClickHouse from start. Fix: Downgrade and check backward incompatible changes for all versions in between. + +Next reasons would require to have manual intervention in pod/volume. +There is two ways, how you can get access to data: + +1. Change entry point of ClickHouse pod to something else, so pod wouldn’t be terminated due ClickHouse error. +2. Attach ClickHouse data volume to some generic pod (like Ubuntu). +3. Unclear restart which produced broken files and/or state on disk is differs too much from state in zookeeper for replicated tables. Fix: Create `force_restore_data` flag. +4. Wrong file permission for ClickHouse files in pod. Fix: Use chown to set right ownership for files and directories. +5. Errors in ClickHouse table schema prevents ClickHouse from start. Fix: Rename problematic `table.sql` scripts to `table.sql.bak` +6. Occasional failure of distributed queries because of wrong user/password. Due nature of k8s with dynamic ip allocations, it's possible that ClickHouse would cache wrong ip-> hostname combination and disallow connections because of mismatched hostname. Fix: run `SYSTEM DROP DNS CACHE;` `1` in config.xml. + +Caveats: + +1. Not all configuration/state folders are being covered by persistent volumes. ([geobases](https://clickhouse.tech/docs/en/sql-reference/functions/ym-dict-functions/#multiple-geobases)) +2. Page cache belongs to k8s node and pv are being mounted to pod, in case of fast shutdown there is possibility to loss some data(needs to be clarified) +3. Some cloud providers (GKE) can have slow unlink command, which is important for ClickHouse because it's needed for parts management. (`max_part_removal_threads` setting) + +Useful commands: + +```bash +kubectl logs chi-chcluster-2-1-0 -c clickhouse-pod -n chcluster --previous +kubectl describe pod chi-chcluster-2-1-0 -n chcluster +``` + +Q. ClickHouse is caching the Kafka pod's IP and trying to connect to the same ip even when there is a new Kafka pod running and the old one is deprecated. Is there some setting where we could refresh the connection + +`1` in config.xml + +### ClickHouse init process failed + +It's due to low value for env `CLICKHOUSE_INIT_TIMEOUT` value. Consider increasing it up to 1 min. +[https://github.com/ClickHouse/ClickHouse/blob/9f5cd35a6963cc556a51218b46b0754dcac7306a/docker/server/entrypoint.sh\#L120](https://github.com/ClickHouse/ClickHouse/blob/9f5cd35a6963cc556a51218b46b0754dcac7306a/docker/server/entrypoint.sh#L120) diff --git a/content/en/altinity-kb-kubernetes/altinity-kb-istio-user-issue-k8s.md b/content/en/altinity-kb-kubernetes/altinity-kb-istio-user-issue-k8s.md new file mode 100644 index 0000000000..5e162d8377 --- /dev/null +++ b/content/en/altinity-kb-kubernetes/altinity-kb-istio-user-issue-k8s.md @@ -0,0 +1,76 @@ +--- +title: "Istio Issues" +linkTitle: "Istio Issues" +weight: 100 +description: + Working with the popular service mesh +keywords: + - istio +--- + +## What is Istio? + +Per documentation on [Istio Project\'s website](https://istio.io/latest/docs/overview/what-is-istio/), Istio is "an open source service mesh that layers transparently onto existing distributed applications. Istio’s powerful features provide a uniform and more efficient way to secure, connect, and monitor services. Istio is the path to load balancing, service-to-service authentication, and monitoring – with few or no service code changes." + +Istio works quite well at providing this functionality, and does so through controlling service-to-service communication in a Cluster, find-grained control of traffic behavior, routing rules, load-balancing, a policy layer and configuration API supporting access controls, rate limiting, etc. + +It also provides metrics about all traffic in a cluster. One can get an amazing amount of metrics from it. Datadog even has a provider that when turned on is a bit like a firehose of information. + +Istio essentially uses a proxy to intercapt all network traffic and provides the ability to configured for providing a appliction-aware features. + +## ClickHouse and Istio + +The implications for ClickHouse need to be taken into consideration however, and this page will attempt to address this from real-life scenarios that Altinity devops, infrastructural, and support engineers have had to solve. + +### Operator High Level Description + +The Altinity ClickHouse Operator, when installed using a deployment, also creates four custom resources: + +- clickhouseinstallations.clickhouse.altinity.com (chi) +- clickhousekeeperinstallations.clickhouse-keeper.altinity.com (chk) +- clickhouseinstallationtemplates.clickhouse.altinity.com (chit) +- clickhouseoperatorconfigurations.clickhouse.altinity.com (chopconf) + +For the first two, it uses StatefullSets to run both Keeper and and ClickHouse clusters. For Keeper, it manages how many replicas specified, and for ClickHouse, it manages both how many replicas and shards are specified. + +In managing `ClickHouseInstallations`, it requires that the operator can interact with the database running on clusters it creates using a specific `clickhouse_operator` user and needs network access rules that allow connection to the ClickHouse pods. + +Many of the issues with Istio can pertain to issues where this can be a problem, particularly in the case where the IP address of the Operator pod changes and no longer is allowed to connect to it's ClickHouse clusters that it manages. + +### Issue: Authentication error of clickhouse-operator + +This was a ClickHouse cluster running in a Kubernetes setup with Istio. + +- The clickhouse operator was unable to query the clickhouse pods because of authentication errors. After a period of time, the operator gave up yet the ClickHouse cluster (ClickHouseInstallation) worked normally. +- Errors showed `AUTHENTICATION_FAILED` and `connections from :ffff:127.0.0.6 are not allowed` as well as `IP_ADDRESS_NOT_ALLOWED` +- Also, the `clickhouse_operator` user correctly configured +- There was a recent issue that on the surface looked similar to a recent issue with  https://altinity.com/blog/deepseek-clickhouse-and-the-altinity-kubernetes-operator (disabled network access for default user due to issue with DeepSeek) and one idea seemed as if upgrading the operator (which would fix the issue if it were default user). +- However, the key to this issue is that the problem was with the `clickhouse_operator` user, not `default` user, hence not due to the aforementioned issue. +- More consiration was given in light of how Istio effects what services can connect which made it more obvious that it was an issue with using Istio in the operator vs. operator version +- The suggestion was given to remove istio from the clickhouse operator `ClickHouseInstallation` and references this issue https://github.com/Altinity/clickhouse-operator/issues/1261#issuecomment-1797895080 +- The change required would be something of the sort: + +```yaml +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: clickhouse-operator +spec: + template: + metadata: + annotations: + sidecar.istio.io/inject: "false" + +--- + +apiVersion: [clickouse.altinity.com/v1](http://clickouse.altinity.com/v1) +kind: ClickHouseInstallation +metadata: + name: your-chi + annotations: + sidecar.istio.io/inject: "false" + +``` + diff --git a/content/en/altinity-kb-kubernetes/altinity-kb-possible-issues-with-running-clickhouse-in-k8s.md b/content/en/altinity-kb-kubernetes/altinity-kb-possible-issues-with-running-clickhouse-in-k8s.md index de1c5240ed..c908418304 100644 --- a/content/en/altinity-kb-kubernetes/altinity-kb-possible-issues-with-running-clickhouse-in-k8s.md +++ b/content/en/altinity-kb-kubernetes/altinity-kb-possible-issues-with-running-clickhouse-in-k8s.md @@ -1,10 +1,11 @@ --- -title: "Possible issues with running ClickHouse in k8s" -linkTitle: "Possible issues with running ClickHouse in k8s" +title: "Possible issues with running ClickHouse® in K8s" +linkTitle: "Possible issues with running ClickHouse® in K8s" description: > - Possible issues with running ClickHouse in k8s + Possible issues with running ClickHouse® in K8s +draft: true --- -The biggest problem with running ClickHouse in k8s, happens when clickhouse-server can't start for some reason and pod is falling in CrashloopBackOff, so you can't easily get in the pod and check/fix/restart ClickHouse. +The biggest problem with running ClickHouse® in K8s, happens when clickhouse-server can't start for some reason and pod is falling in CrashloopBackOff, so you can't easily get in the pod and check/fix/restart ClickHouse. There is multiple possible reasons for this, some of them can be fixed without manual intervention in pod: @@ -25,7 +26,7 @@ Caveats: 1. Not all configuration/state folders are being covered by persistent volumes. ([geobases](https://clickhouse.tech/docs/en/sql-reference/functions/ym-dict-functions/#multiple-geobases)) 2. Page cache belongs to k8s node and pv are being mounted to pod, in case of fast shutdown there is possibility to loss some data(needs to be clarified) -3. Some cloud providers (GKE) can have slow unlink command, which is important for clickhouse because it's needed for parts management. (`max_part_removal_threads` setting) +3. Some cloud providers (GKE) can have slow unlink command, which is important for ClickHouse because it's needed for parts management. (`max_part_removal_threads` setting) Useful commands: @@ -34,7 +35,7 @@ kubectl logs chi-chcluster-2-1-0 -c clickhouse-pod -n chcluster --previous kubectl describe pod chi-chcluster-2-1-0 -n chcluster ``` -Q. Clickhouse is caching the Kafka pod's IP and trying to connect to the same ip even when there is a new Kafka pod running and the old one is deprecated. Is there some setting where we could refresh the connection +Q. ClickHouse is caching the Kafka pod's IP and trying to connect to the same ip even when there is a new Kafka pod running and the old one is deprecated. Is there some setting where we could refresh the connection `1` in config.xml diff --git a/content/en/altinity-kb-queries-and-syntax/_index.md b/content/en/altinity-kb-queries-and-syntax/_index.md index e771c1d459..46d710897d 100644 --- a/content/en/altinity-kb-queries-and-syntax/_index.md +++ b/content/en/altinity-kb-queries-and-syntax/_index.md @@ -5,6 +5,6 @@ keywords: - clickhouse queries - clickhouse joins description: > - Learn about ClickHouse queries & syntax, including Joins & Window Functions. + Learn about ClickHouse® queries & syntax, including Joins & Window Functions. weight: 1 --- diff --git a/content/en/altinity-kb-queries-and-syntax/altinity-kb-alter-modify-column-is-stuck-the-column-is-inaccessible.md b/content/en/altinity-kb-queries-and-syntax/altinity-kb-alter-modify-column-is-stuck-the-column-is-inaccessible.md index 45be4c3aaf..ce63579370 100644 --- a/content/en/altinity-kb-queries-and-syntax/altinity-kb-alter-modify-column-is-stuck-the-column-is-inaccessible.md +++ b/content/en/altinity-kb-queries-and-syntax/altinity-kb-alter-modify-column-is-stuck-the-column-is-inaccessible.md @@ -6,13 +6,13 @@ description: > --- ## Problem -You have table: +You’ve created a table in ClickHouse with the following structure: ```sql CREATE TABLE modify_column(column_n String) ENGINE=MergeTree() ORDER BY tuple(); ``` -Populate it with data: +You populated the table with some data: ```sql INSERT INTO modify_column VALUES ('key_a'); @@ -20,13 +20,13 @@ INSERT INTO modify_column VALUES ('key_b'); INSERT INTO modify_column VALUES ('key_c'); ``` -Tried to apply alter table query with changing column type: +Next, you attempted to change the column type using this query: ```sql ALTER TABLE modify_column MODIFY COLUMN column_n Enum8('key_a'=1, 'key_b'=2); ``` -But it didn’t succeed and you see an error in system.mutations table: +However, the operation failed, and you encountered an error when inspecting the system.mutations table: ```sql SELECT * @@ -51,7 +51,12 @@ latest_fail_time: 2021-03-03 18:38:59 latest_fail_reason: Code: 36, e.displayText() = DB::Exception: Unknown element 'key_c' for type Enum8('key_a' = 1, 'key_b' = 2): while executing 'FUNCTION CAST(column_n :: 0, 'Enum8(\'key_a\' = 1, \'key_b\' = 2)' :: 1) -> cast(column_n, 'Enum8(\'key_a\' = 1, \'key_b\' = 2)') Enum8('key_a' = 1, 'key_b' = 2) : 2': (while reading from part /var/lib/clickhouse/data/default/modify_column/all_3_3_0/): While executing MergeTree (version 21.3.1.6041) ``` -And you can’t query that column anymore: +The mutation result showed an error indicating that the value 'key_c' was not recognized in the Enum8 definition: +```sql +Unknown element 'key_c' for type Enum8('key_a' = 1, 'key_b' = 2) +``` + +Now, when trying to query the column, ClickHouse returns an exception and the column becomes inaccessible: ```sql SELECT column_n @@ -70,36 +75,54 @@ Received exception from server (version 21.3.1): Code: 36. DB::Exception: Received from localhost:9000. DB::Exception: Unknown element 'key_c' for type Enum8('key_a' = 1, 'key_b' = 2): while executing 'FUNCTION CAST(column_n :: 0, 'Enum8(\'key_a\' = 1, \'key_b\' = 2)' :: 1) -> cast(column_n, 'Enum8(\'key_a\' = 1, \'key_b\' = 2)') Enum8('key_a' = 1, 'key_b' = 2) : 2': (while reading from part /var/lib/clickhouse/data/default/modify_column/all_3_3_0/): While executing MergeTreeThread. ``` -### Solution +This query results in: +```sql +Code: 36. DB::Exception: Unknown element 'key_c' for type Enum8('key_a' = 1, 'key_b' = 2) +``` -You should do the following: +### Root Cause +The failure occurred because the Enum8 type only allows for predefined values. Since 'key_c' wasn't included in the definition, the mutation failed and left the table in an inconsistent state. + +### Solution -Check which mutation is stuck and kill it: +1. Identify and Terminate the Stuck Mutation +First, you need to locate the mutation that’s stuck in an incomplete state. ```sql SELECT * FROM system.mutations WHERE table = 'modify_column' AND is_done=0 FORMAT Vertical; +``` + +Once you’ve identified the mutation, terminate it using: +```sql KILL MUTATION WHERE table = 'modify_column' AND mutation_id = 'id_of_stuck_mutation'; ``` +This will stop the operation and allow you to revert the changes. -Apply reverting modify column query to convert table to previous column type: +2. Revert the Column Type +Next, revert the column back to its original type, which was String, to restore the table’s accessibility: ```sql ALTER TABLE modify_column MODIFY COLUMN column_n String; ``` -Check if column is accessible now: +3. Verify the Column is Accessible Again +To ensure the column is functioning normally, run a simple query to verify its data: ```sql SELECT column_n, count() FROM modify_column GROUP BY column_n; ``` -Run fixed ALTER MODIFY COLUMN query. +4. Apply the Correct Column Modification +Now that the column is accessible, you can safely reapply the ALTER query, but this time include all the required enum values: ```sql ALTER TABLE modify_column MODIFY COLUMN column_n Enum8('key_a'=1, 'key_b'=2, 'key_c'=3); ``` -You can monitor progress of column type change with system.mutations or system.parts_columns tables: +5. Monitor Progress +You can monitor the progress of the column modification using the system.mutations or system.parts_columns tables to ensure everything proceeds as expected: + +To track mutation progress: ```sql SELECT @@ -107,8 +130,12 @@ SELECT parts_to_do, is_done FROM system.mutations -WHERE table = 'modify_column' +WHERE table = 'modify_column'; +``` + +To review the column's active parts: +```sql SELECT column, type, @@ -119,5 +146,5 @@ FROM system.parts_columns WHERE (table = 'modify_column') AND (column = 'column_n') AND active GROUP BY column, - type + type; ``` diff --git a/content/en/altinity-kb-queries-and-syntax/altinity-kb-final-clause-speed.md b/content/en/altinity-kb-queries-and-syntax/altinity-kb-final-clause-speed.md index ade3331949..64d833fa81 100644 --- a/content/en/altinity-kb-queries-and-syntax/altinity-kb-final-clause-speed.md +++ b/content/en/altinity-kb-queries-and-syntax/altinity-kb-final-clause-speed.md @@ -6,18 +6,32 @@ description: > --- `SELECT * FROM table FINAL` -* Before 20.5 - always executed in a single thread and slow. +### History + +* Before ClickHouse® 20.5 - always executed in a single thread and slow. * Since 20.5 - final can be parallel, see [https://github.com/ClickHouse/ClickHouse/pull/10463](https://github.com/ClickHouse/ClickHouse/pull/10463) -* Since 20.10 - you can use `do_not_merge_across_partitions_select_final` setting. -* Sinse 22.6 - final even more parallel, see [https://github.com/ClickHouse/ClickHouse/pull/36396](https://github.com/ClickHouse/ClickHouse/pull/36396) +* Since 20.10 - you can use `do_not_merge_across_partitions_select_final` setting. See [https://github.com/ClickHouse/ClickHouse/pull/15938](https://github.com/ClickHouse/ClickHouse/pull/15938) and [https://github.com/ClickHouse/ClickHouse/issues/11722](https://github.com/ClickHouse/ClickHouse/issues/11722) +* Since 22.6 - final even more parallel, see [https://github.com/ClickHouse/ClickHouse/pull/36396](https://github.com/ClickHouse/ClickHouse/pull/36396) +* Since 22.8 - final doesn't read excessive data, see [https://github.com/ClickHouse/ClickHouse/pull/47801](https://github.com/ClickHouse/ClickHouse/pull/47801) +* Since 23.5 - final use less memory, see [https://github.com/ClickHouse/ClickHouse/pull/50429](https://github.com/ClickHouse/ClickHouse/pull/50429) +* Since 23.9 - final doesn't read PK columns if unneeded ie only one part in partition, see [https://github.com/ClickHouse/ClickHouse/pull/53919](https://github.com/ClickHouse/ClickHouse/pull/53919) +* Since 23.12 - final applied only for intersecting ranges of parts, see [https://github.com/ClickHouse/ClickHouse/pull/58120](https://github.com/ClickHouse/ClickHouse/pull/58120) +* Since 24.1 - final doesn't compare rows from the same part with level > 0, see [https://github.com/ClickHouse/ClickHouse/pull/58142](https://github.com/ClickHouse/ClickHouse/pull/58142) +* Since 24.1 - final use vertical algorithm (more cache friendly), see [https://github.com/ClickHouse/ClickHouse/pull/54366](https://github.com/ClickHouse/ClickHouse/pull/54366) +* Since 25.6 - final supports Additional Skip Indexes, see [https://github.com/ClickHouse/ClickHouse/pull/78350](https://github.com/ClickHouse/ClickHouse/pull/78350) + + +### Partitioning -See [https://github.com/ClickHouse/ClickHouse/pull/15938](https://github.com/ClickHouse/ClickHouse/pull/15938) and [https://github.com/ClickHouse/ClickHouse/issues/11722](https://github.com/ClickHouse/ClickHouse/issues/11722) +Proper partition design could speed up FINAL processing. -So it can work in the following way: +For example, if you have a table with Daily partitioning, you can: +- After day end + some time interval during which you can get some updates run `OPTIMIZE TABLE xxx PARTITION 'prev_day' FINAL` +- or add table SETTINGS min_age_to_force_merge_seconds=86400,min_age_to_force_merge_on_partition_only=1 + +In that case, using FINAL with `do_not_merge_across_partitions_select_final` will be cheap or even zero. -1. Daily partitioning -2. After day end + some time interval during which you can get some updates - for example at 3am / 6am you do `OPTIMIZE TABLE xxx PARTITION 'prev_day' FINAL` -3. In that case using that FINAL with `do_not_merge_across_partitions_select_final` will be cheap. +Example: ```sql DROP TABLE IF EXISTS repl_tbl; @@ -81,3 +95,33 @@ SELECT count() FROM repl_tbl FINAL WHERE NOT ignore(*) /* only 0.35 sec slower, and while partitions have about the same size that extra cost will be about constant */ ``` + +### Light ORDER BY + +All columns specified in ORDER BY will be read during FINAL processing, creating additional disk load. Use fewer columns and lighter column types to create faster queries. + +Example: UUID vs UInt64 +``` +CREATE TABLE uuid_table (id UUID, value UInt64) ENGINE = ReplacingMergeTree() ORDER BY id; +CREATE TABLE uint64_table (id UInt64,value UInt64) ENGINE = ReplacingMergeTree() ORDER BY id; + +INSERT INTO uuid_table SELECT generateUUIDv4(), number FROM numbers(5E7); +INSERT INTO uint64_table SELECT number, number FROM numbers(5E7); + +SELECT sum(value) FROM uuid_table FINAL format JSON; +SELECT sum(value) FROM uint64_table FINAL format JSON; +``` +[Results](https://fiddle.clickhouse.com/e2441e5d-ccb6-4f67-bee0-7cc2c4e3f43e): +``` + "elapsed": 0.58738197, + "rows_read": 50172032, + "bytes_read": 1204128768 + + "elapsed": 0.189792142, + "rows_read": 50057344, + "bytes_read": 480675040 +``` + + + + diff --git a/content/en/altinity-kb-queries-and-syntax/altinity-kb-kill-query.md b/content/en/altinity-kb-queries-and-syntax/altinity-kb-kill-query.md index 8cb6d48148..255bc07de0 100644 --- a/content/en/altinity-kb-queries-and-syntax/altinity-kb-kill-query.md +++ b/content/en/altinity-kb-queries-and-syntax/altinity-kb-kill-query.md @@ -7,12 +7,12 @@ description: > Unfortunately not all queries can be killed. `KILL QUERY` only sets a flag that must be checked by the query. A query pipeline is checking this flag before a switching to next block. If the pipeline has stuck somewhere in the middle it cannot be killed. -If a query does not stop, the only way to get rid of it is to restart ClickHouse. +If a query does not stop, the only way to get rid of it is to restart ClickHouse®. -See also +See also: -[https://github.com/ClickHouse/ClickHouse/issues/3964](https://github.com/ClickHouse/ClickHouse/issues/3964) -[https://github.com/ClickHouse/ClickHouse/issues/1576](https://github.com/ClickHouse/ClickHouse/issues/1576) +* [https://github.com/ClickHouse/ClickHouse/issues/3964](https://github.com/ClickHouse/ClickHouse/issues/3964) +* [https://github.com/ClickHouse/ClickHouse/issues/1576](https://github.com/ClickHouse/ClickHouse/issues/1576) ## How to replace a running query diff --git a/content/en/altinity-kb-queries-and-syntax/altinity-kb-optimize-vs-optimize-final.md b/content/en/altinity-kb-queries-and-syntax/altinity-kb-optimize-vs-optimize-final.md index 8d525b7bf0..a2f0a245a8 100644 --- a/content/en/altinity-kb-queries-and-syntax/altinity-kb-optimize-vs-optimize-final.md +++ b/content/en/altinity-kb-queries-and-syntax/altinity-kb-optimize-vs-optimize-final.md @@ -12,7 +12,7 @@ You have 40 parts in 3 partitions. This unscheduled merge selects some partition `OPTIMIZE TABLE xyz FINAL` -- initiates a cycle of unscheduled merges. -ClickHouse merges parts in this table until will remains 1 part in each partition (if a system has enough free disk space). As a result, you get 3 parts, 1 part per partition. In this case, CH rewrites parts even if they are already merged into a single part. It creates a huge CPU / Disk load if the table ( XYZ) is huge. ClickHouse reads / uncompress / merge / compress / writes all data in the table. +ClickHouse® merges parts in this table until will remains 1 part in each partition (if a system has enough free disk space). As a result, you get 3 parts, 1 part per partition. In this case, ClickHouse rewrites parts even if they are already merged into a single part. It creates a huge CPU / Disk load if the table (XYZ) is huge. ClickHouse reads / uncompress / merge / compress / writes all data in the table. If this table has size 1TB it could take around 3 hours to complete. diff --git a/content/en/altinity-kb-queries-and-syntax/altinity-kb-parameterized-views.md b/content/en/altinity-kb-queries-and-syntax/altinity-kb-parameterized-views.md index ab0499f8e3..27b1a53b7b 100644 --- a/content/en/altinity-kb-queries-and-syntax/altinity-kb-parameterized-views.md +++ b/content/en/altinity-kb-queries-and-syntax/altinity-kb-parameterized-views.md @@ -4,6 +4,36 @@ linkTitle: "Parameterized views" description: > Parameterized views --- + +ClickHouse® versions 23.1+ (23.1.6.42, 23.2.5.46, 23.3.1.2823) +have inbuilt support for [parametrized views](https://clickhouse.com/docs/en/sql-reference/statements/create/view#parameterized-view): + +```sql +CREATE VIEW my_new_view AS +SELECT * +FROM deals +WHERE category_id IN ( + SELECT category_id + FROM deal_categories + WHERE category = {category:String} +) + +SELECT * FROM my_new_view(category = 'hot deals'); +``` +### One more example + +```sql +CREATE OR REPLACE VIEW v AS SELECT 1::UInt32 x WHERE x IN ({xx:Array(UInt32)}); + +select * from v(xx=[1,2,3]); +┌─x─┐ +│ 1 │ +└───┘ +``` + + +## ClickHouse versions pre 23.1 + Custom settings allows to emulate parameterized views. You need to enable custom settings and define any prefixes for settings. diff --git a/content/en/altinity-kb-queries-and-syntax/altinity-kb-possible-deadlock-avoided.-client-should-retry.md b/content/en/altinity-kb-queries-and-syntax/altinity-kb-possible-deadlock-avoided.-client-should-retry.md index cd8727780c..e5c1be17b0 100644 --- a/content/en/altinity-kb-queries-and-syntax/altinity-kb-possible-deadlock-avoided.-client-should-retry.md +++ b/content/en/altinity-kb-queries-and-syntax/altinity-kb-possible-deadlock-avoided.-client-should-retry.md @@ -4,7 +4,7 @@ linkTitle: "Possible deadlock avoided. Client should retry" description: > Possible deadlock avoided. Client should retry --- -In version 19.14 a serious issue was found: a race condition that can lead to server deadlock. The reason for that was quite fundamental, and a temporary workaround for that was added ("possible deadlock avoided"). +In ClickHouse® version 19.14 a serious issue was found: a race condition that can lead to server deadlock. The reason for that was quite fundamental, and a temporary workaround for that was added ("possible deadlock avoided"). Those locks are one of the fundamental things that the core team was actively working on in 2020. @@ -20,4 +20,8 @@ In 20.6 all table-level locks which were possible to remove were removed, so alt Typically issue was happening when doing some concurrent select on `system.parts` / `system.columns` / `system.table` with simultaneous table manipulations (doing some kind of ALTERS / TRUNCATES / DROP)I -If that exception happens often in your use-case: An update is recommended. In the meantime, check which queries are running (especially to system.tables / system.parts and other system tables) and check if killing them / avoiding them helps to solve the issue. +If that exception happens often in your use-case: +- use recent clickhouse versions +- ensure you use Atomic engine for the database (not Ordinary) (can be checked in system.databases) + +Sometime you can try to workaround issue by finding the queries which uses that table concurently (especially to system.tables / system.parts and other system tables) and try killing them (or avoiding them). diff --git a/content/en/altinity-kb-queries-and-syntax/altinity-kb-sample-by.md b/content/en/altinity-kb-queries-and-syntax/altinity-kb-sample-by.md index 6dfa4cecbe..7f7e010091 100644 --- a/content/en/altinity-kb-queries-and-syntax/altinity-kb-sample-by.md +++ b/content/en/altinity-kb-queries-and-syntax/altinity-kb-sample-by.md @@ -8,17 +8,17 @@ The execution pipeline is embedded in the partition reading code. So that works this way: -1. ClickHouse does partition pruning based on `WHERE` conditions. +1. ClickHouse® does partition pruning based on `WHERE` conditions. 2. For every partition, it picks a columns ranges (aka 'marks' / 'granulas') based on primary key conditions. 3. Here the sampling logic is applied: a) in case of `SAMPLE k` (`k` in `0..1` range) it adds conditions `WHERE sample_key < k * max_int_of_sample_key_type` b) in case of `SAMPLE k OFFSET m` it adds conditions `WHERE sample_key BETWEEN m * max_int_of_sample_key_type AND (m + k) * max_int_of_sample_key_type`c) in case of `SAMPLE N` (N>1) if first estimates how many rows are inside the range we need to read and based on that convert it to 3a case (calculate k based on number of rows in ranges and desired number of rows) 4. on the data returned by those other conditions are applied (so here the number of rows can be decreased here) -[Source Code](https://github.com/ClickHouse/ClickHouse/blob/92c937db8b50844c7216d93c5c398d376e82f6c3/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp#L355) +* [Source Code](https://github.com/ClickHouse/ClickHouse/blob/92c937db8b50844c7216d93c5c398d376e82f6c3/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp#L355) ## SAMPLE by -[Docs](https://clickhouse.yandex/docs/en/query_language/select/#select-sample-clause) -[Source Code](https://github.com/ClickHouse/ClickHouse/blob/92c937db8b50844c7216d93c5c398d376e82f6c3/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp#L355) +* [Docs](https://clickhouse.yandex/docs/en/query_language/select/#select-sample-clause) +* [Source Code](https://github.com/ClickHouse/ClickHouse/blob/92c937db8b50844c7216d93c5c398d376e82f6c3/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp#L355) SAMPLE key Must be: @@ -56,4 +56,4 @@ SELECT count() FROM table WHERE ... AND cityHash64(some_high_card_key) % 10 = 0; SELECT count() FROM table WHERE ... AND rand() % 10 = 0; -- Non-deterministic ``` -ClickHouse will read more data from disk compared to an example with a good SAMPLE key, but it's more universal and can be used if you can't change table ORDER BY key. \ No newline at end of file +ClickHouse will read more data from disk compared to an example with a good SAMPLE key, but it's more universal and can be used if you can't change table ORDER BY key. (To learn more about ClickHouse internals, [Administrator Training for ClickHouse](https://altinity.com/clickhouse-training/) is available.) \ No newline at end of file diff --git a/content/en/altinity-kb-queries-and-syntax/ansi-sql-mode.md b/content/en/altinity-kb-queries-and-syntax/ansi-sql-mode.md index 61d3973b5d..ab4306ecda 100644 --- a/content/en/altinity-kb-queries-and-syntax/ansi-sql-mode.md +++ b/content/en/altinity-kb-queries-and-syntax/ansi-sql-mode.md @@ -4,13 +4,49 @@ linkTitle: "ANSI SQL mode" description: > ANSI SQL mode --- -It's possible to tune some settings which would make ClickHouse more ANSI SQL compatible(and slower): +To make ClickHouse® more compatible with ANSI SQL standards (at the expense of some performance), you can adjust several settings. These configurations will bring ClickHouse closer to ANSI SQL behavior but may introduce a slowdown in query performance: ```sql -SET join_use_nulls=1; -- introduced long ago -SET cast_keep_nullable=1; -- introduced in 20.5 -SET union_default_mode='DISTINCT'; -- introduced in 21.1 -SET allow_experimental_window_functions=1; -- introduced in 21.3 -SET prefer_column_name_to_alias=1; -- introduced in 21.4; -SET group_by_use_nulls=1; -- introduced in 22.7; +join_use_nulls=1 ``` +Introduced in: early versions +Ensures that JOIN operations return NULL for non-matching rows, aligning with standard SQL behavior. + + +```sql +cast_keep_nullable=1 +``` +Introduced in: v20.5 +Preserves the NULL flag when casting between data types, which is typical in ANSI SQL. + + +```sql +union_default_mode='DISTINCT' +``` +Introduced in: v21.1 +Makes the UNION operation default to UNION DISTINCT, which removes duplicate rows, following ANSI SQL behavior. + + +```sql +allow_experimental_window_functions=1 +``` +Introduced in: v21.3 +Enables support for window functions, which are a standard feature in ANSI SQL. + + +```sql +prefer_column_name_to_alias=1 +``` +Introduced in: v21.4 +This setting resolves ambiguities by preferring column names over aliases, following ANSI SQL conventions. + + +```sql +group_by_use_nulls=1 +``` +Introduced in: v22.7 +Allows NULL values to appear in the GROUP BY clause, consistent with ANSI SQL behavior. + +By enabling these settings, ClickHouse becomes more ANSI SQL-compliant, although this may come with a trade-off in terms of performance. Each of these options can be enabled as needed, based on the specific SQL compatibility requirements of your application. + + diff --git a/content/en/altinity-kb-queries-and-syntax/array-functions-as-window.md b/content/en/altinity-kb-queries-and-syntax/array-functions-as-window.md index 8a66dde750..73a08bdca8 100644 --- a/content/en/altinity-kb-queries-and-syntax/array-functions-as-window.md +++ b/content/en/altinity-kb-queries-and-syntax/array-functions-as-window.md @@ -2,15 +2,14 @@ title: "Using array functions to mimic window-functions alike behavior" linkTitle: "Using array functions to mimic window-functions alike behavior" weight: 100 -description: >- - Using array functions to mimic window-functions alike behavior. --- -# Using array functions to mimic window functions alike behavior +There are cases where you may need to mimic window functions using arrays in ClickHouse. This could be for optimization purposes, to better manage memory, or to enable on-disk spilling, especially if you’re working with an older version of ClickHouse that doesn't natively support window functions. -There are some usecases when you may want to mimic window functions using Arrays - as an optimization step, or to contol the memory better / use on-disk spiling, or just if you have old ClickHouse version. +Here’s an example demonstrating how to mimic a window function like runningDifference() using arrays: -## Running difference sample +#### Step 1: Create Sample Data +We’ll start by creating a test table with some sample data: ```sql DROP TABLE IS EXISTS test_running_difference @@ -24,10 +23,8 @@ SELECT FROM numbers(100) -SELECT * FROM test_running_difference -``` +SELECT * FROM test_running_difference; -```text ┌─id─┬──────────────────ts─┬────val─┐ │ 0 │ 2010-01-01 00:00:00 │ -1209 │ │ 1 │ 2010-01-01 00:00:00 │ 43 │ @@ -134,13 +131,15 @@ SELECT * FROM test_running_difference 100 rows in set. Elapsed: 0.003 sec. ``` +This table contains IDs, timestamps (ts), and values (val), where each id appears multiple times with different timestamps. + +#### Step 2: Running Difference Example +If you try using runningDifference directly, it works block by block, which can be problematic when the data needs to be ordered or when group changes occur. + -runningDifference works only in blocks & require ordered data & problematic when group changes ```sql select id, val, runningDifference(val) from (select * from test_running_difference order by id, ts); -``` -``` ┌─id─┬────val─┬─runningDifference(val)─┐ │ 0 │ -1209 │ 0 │ │ 0 │ 66839 │ 68048 │ @@ -248,13 +247,15 @@ select id, val, runningDifference(val) from (select * from test_running_differen ``` -## Arrays ! +The output may look inconsistent because runningDifference requires ordered data within blocks. -### 1. Group & Collect the data into array +#### Step 3: Using Arrays for Grouping and Calculation +Instead of using runningDifference, we can utilize arrays to group data, sort it, and apply similar logic more efficiently. +**Grouping Data into Arrays** - +You can group multiple columns into arrays by using the groupArray function. For example, to collect several columns as arrays of tuples, you can use the following query: -you can collect several column by builing array of tuples: -``` +```sql SELECT id, groupArray(tuple(ts, val)) @@ -285,10 +286,9 @@ GROUP BY id └────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -### Do needed ordering in each array - -For example - by second element of tuple: -``` +**Sorting Arrays** - +To sort the arrays by a specific element, for example, by the second element of the tuple, you can use the arraySort function: +```sql SELECT id, arraySort(x -> (x.2), groupArray((ts, val))) @@ -321,9 +321,11 @@ GROUP BY id 20 rows in set. Elapsed: 0.004 sec. ``` -That can be rewritten like this: +This sorts each array by the val (second element of the tuple) for each id. -``` +Simplified Sorting Example - We can rewrite the query in a more concise way using WITH clauses for better readability: + +```sql WITH groupArray(tuple(ts, val)) as window_rows, arraySort(x -> x.1, window_rows) as sorted_window_rows @@ -334,9 +336,10 @@ FROM test_running_difference GROUP BY id ``` -### Apply needed logic arrayMap / arrayDifference etc +**Applying Calculations with Arrays** - +Once the data is sorted, you can apply array functions like arrayMap and arrayDifference to calculate differences between values in the arrays: -``` +```sql WITH groupArray(tuple(ts, val)) as window_rows, arraySort(x -> x.1, window_rows) as sorted_window_rows, @@ -347,10 +350,7 @@ SELECT sorted_window_rows_val_column_diff FROM test_running_difference GROUP BY id -``` - -``` ┌─id─┬─sorted_window_rows_val_column_diff─┐ │ 0 │ [0,68048,68243,72389,67860] │ │ 1 │ [0,19397,17905,16978,18345] │ @@ -380,10 +380,8 @@ GROUP BY id You can do also a lot of magic with arrayEnumerate and accessing different values by their ids. -### Now you can return you arrays back to rows - - -use arrayJoin +**Reverting Arrays Back to Rows** - +You can convert the arrays back into rows using arrayJoin: ```sql WITH @@ -398,9 +396,7 @@ SELECT FROM test_running_difference GROUP BY id ``` - - - or ARRAY JOIN +Or use ARRAY JOIN to join the arrays back to the original structure: ```sql SELECT @@ -421,8 +417,6 @@ FROM test_running_difference GROUP BY id ) as t1 ARRAY JOIN sorted_window_rows_val_column_diff as diff, sorted_window_rows_ts_column as ts - ``` - -etc. +This allows you to manipulate and analyze data within arrays effectively, using powerful functions such as arrayMap, arrayDifference, and arrayEnumerate. diff --git a/content/en/altinity-kb-queries-and-syntax/async-inserts.md b/content/en/altinity-kb-queries-and-syntax/async-inserts.md new file mode 100644 index 0000000000..2bb0486557 --- /dev/null +++ b/content/en/altinity-kb-queries-and-syntax/async-inserts.md @@ -0,0 +1,157 @@ +--- +title: "Async INSERTs" +linkTitle: "Async INSERTs" +description: > + Comprehensive guide to ClickHouse Async INSERTs - configuration, best practices, and monitoring +--- + +## Overview + +Async INSERTs is a ClickHouse® feature that enables automatic server-side batching of data. While we generally recommend batching at the application/ingestor level for better control and decoupling, async inserts are valuable when you have hundreds or thousands of clients performing small inserts and client-side batching is not feasible. + +**Key Documentation:** [Official Async Inserts Documentation](https://clickhouse.com/docs/en/optimize/asynchronous-inserts) + +## How Async Inserts Work + +When `async_insert=1` is enabled, ClickHouse buffers incoming inserts and flushes them to disk when one of these conditions is met: +1. Buffer reaches specified size (`async_insert_max_data_size`) +2. Time threshold elapses (`async_insert_busy_timeout_ms`) +3. Maximum number of queries accumulate (`async_insert_max_query_number`) + +## Critical Configuration Settings + +### Core Settings + +```sql +-- Enable async inserts (0=disabled, 1=enabled) +SET async_insert = 1; + +-- Wait behavior (STRONGLY RECOMMENDED: use 1) +-- 0 = fire-and-forget mode (risky - no error feedback) +-- 1 = wait for data to be written to storage +SET wait_for_async_insert = 1; + +-- Buffer flush conditions +SET async_insert_max_data_size = 1000000; -- 1MB default +SET async_insert_busy_timeout_ms = 1000; -- 1 second +SET async_insert_max_query_number = 100; -- max queries before flush +``` + +### Adaptive Timeout (Since 24.3) + +```sql +-- Adaptive timeout automatically adjusts flush timing based on server load +-- Default: 1 (enabled) - OVERRIDES manual timeout settings +-- Set to 0 for deterministic behavior with manual settings +SET async_insert_use_adaptive_busy_timeout = 0; +``` + +## Important Behavioral Notes + +### What Works and What Doesn't + +✅ **Works with Async Inserts:** +- Direct INSERT with VALUES +- INSERT with FORMAT (JSONEachRow, CSV, etc.) +- Native protocol inserts (since 22.x) + +❌ **Does NOT Work:** +- `INSERT .. SELECT` statements - Other strategies are needed for managing performance and load. Do not use `async_insert`. + +### Data Safety Considerations + +**ALWAYS use `wait_for_async_insert = 1` in production!** + +Risks with `wait_for_async_insert = 0`: +- **Silent data loss** on errors (read-only table, disk full, too many parts) +- Data loss on sudden restart (no fsync by default) +- Data not immediately queryable after acknowledgment +- No error feedback to client + +### Deduplication Behavior + +- **Sync inserts:** Automatic deduplication enabled by default +- **Async inserts:** Deduplication disabled by default +- Enable with `async_insert_deduplicate = 1` (since 22.x) +- **Warning:** Don't use with `deduplicate_blocks_in_dependent_materialized_views = 1` + +# features / improvements + +* Async insert dedup: Support block deduplication for asynchronous inserts. Before this change, async inserts did not support deduplication, because multiple small inserts coexisted in one inserted batch: + - [#38075](https://github.com/ClickHouse/ClickHouse/issues/38075) + - [#43304](https://github.com/ClickHouse/ClickHouse/pull/43304) +* Added system table `asynchronous_insert_log`. It contains information about asynchronous inserts (including results of queries in fire-and-forget mode. (with wait_for_async_insert=0)) for better introspection [#42040](https://github.com/ClickHouse/ClickHouse/pull/42040) +* Support async inserts in **clickhouse-client** for queries with inlined data **(Native protocol)**: + - [#34267](https://github.com/ClickHouse/ClickHouse/pull/34267) + - [#54098](https://github.com/ClickHouse/ClickHouse/issues/54098) + - [#54381](https://github.com/ClickHouse/ClickHouse/issues/54381) +* Async insert backpressure [#4762](https://github.com/ClickHouse/ClickHouse/issues/47623) +* Limit the deduplication overhead when using `async_insert_deduplicate` [#46549](https://github.com/ClickHouse/ClickHouse/pull/46549) +* `SYSTEM FLUSH ASYNC INSERTS` [#49160](https://github.com/ClickHouse/ClickHouse/pull/49160) +* Adjustable asynchronous insert timeouts [#58486](https://github.com/ClickHouse/ClickHouse/pull/58486) + + +## bugfixes + +- Fixed bug which could lead to deadlock while using asynchronous inserts [#43233](https://github.com/ClickHouse/ClickHouse/pull/43233). +- Fix crash when async inserts with deduplication are used for ReplicatedMergeTree tables using a nondefault merging algorithm [#51676](https://github.com/ClickHouse/ClickHouse/pull/51676) +- Async inserts not working with log_comment setting [48430](https://github.com/ClickHouse/ClickHouse/issues/48430) +- Fix misbehaviour with async inserts with deduplication [#50663](https://github.com/ClickHouse/ClickHouse/pull/50663) +- Reject Insert if `async_insert=1` and `deduplicate_blocks_in_dependent_materialized_views=1`[#60888](https://github.com/ClickHouse/ClickHouse/pull/60888) +- Disable `async_insert_use_adaptive_busy_timeout` correctly with compatibility settings [#61486](https://github.com/ClickHouse/ClickHouse/pull/61468) + + +## observability / introspection + +In 22.x versions, it is not possible to relate `part_log/query_id` column with `asynchronous_insert_log/query_id` column. We need to use `query_log/query_id`: + +`asynchronous_insert_log` shows up the `query_id` and `flush_query_id` of each async insert. The `query_id` from `asynchronous_insert_log` shows up in the `system.query_log` as `type = 'QueryStart'` but the same `query_id` does not show up in the `query_id` column of the `system.part_log`. Because the `query_id` column in the `part_log` is the identifier of the INSERT query that created a data part, and it seems it is for sync INSERTS but not for async inserts. + +So in `asynchronous_inserts` table you can check the current batch that still has not been flushed. In the `asynchronous_insert_log` you can find a log of all the flushed async inserts. + +This has been improved in **ClickHouse 23.7** Flush queries for async inserts (the queries that do the final push of data) are now logged in the `system.query_log` where they appear as `query_kind = 'AsyncInsertFlush'` [#51160](https://github.com/ClickHouse/ClickHouse/pull/51160) + + +## Versions + +- **23.8** is a good version to start using async inserts because of the improvements and bugfixes. +- **24.3** the new adaptive timeout mechanism has been added so ClickHouse will throttle the inserts based on the server load.[#58486](https://github.com/ClickHouse/ClickHouse/pull/58486) This new feature is enabled by default and will OVERRRIDE current async insert settings, so better to disable it if your async insert settings are working. Here's how to do it in a clickhouse-client session: `SET async_insert_use_adaptive_busy_timeout = 0;` You can also add it as a setting on the INSERT or as a profile setting. + + +## Metrics + +```sql +SELECT name +FROM system.columns +WHERE (`table` = 'metric_log') AND ((name ILIKE '%asyncinsert%') OR (name ILIKE '%asynchronousinsert%')) + +┌─name─────────────────────────────────────────────┐ +│ ProfileEvent_AsyncInsertQuery │ +│ ProfileEvent_AsyncInsertBytes │ +│ ProfileEvent_AsyncInsertRows │ +│ ProfileEvent_AsyncInsertCacheHits │ +│ ProfileEvent_FailedAsyncInsertQuery │ +│ ProfileEvent_DistributedAsyncInsertionFailures │ +│ CurrentMetric_AsynchronousInsertThreads │ +│ CurrentMetric_AsynchronousInsertThreadsActive │ +│ CurrentMetric_AsynchronousInsertThreadsScheduled │ +│ CurrentMetric_AsynchronousInsertQueueSize │ +│ CurrentMetric_AsynchronousInsertQueueBytes │ +│ CurrentMetric_PendingAsyncInsert │ +│ CurrentMetric_AsyncInsertCacheSize │ +└──────────────────────────────────────────────────┘ + +SELECT * +FROM system.metrics +WHERE (metric ILIKE '%asyncinsert%') OR (metric ILIKE '%asynchronousinsert%') + +┌─metric─────────────────────────────┬─value─┬─description─────────────────────────────────────────────────────────────┐ +│ AsynchronousInsertThreads │ 1 │ Number of threads in the AsynchronousInsert thread pool. │ +│ AsynchronousInsertThreadsActive │ 0 │ Number of threads in the AsynchronousInsert thread pool running a task. │ +│ AsynchronousInsertThreadsScheduled │ 0 │ Number of queued or active jobs in the AsynchronousInsert thread pool. │ +│ AsynchronousInsertQueueSize │ 1 │ Number of pending tasks in the AsynchronousInsert queue. │ +│ AsynchronousInsertQueueBytes │ 680 │ Number of pending bytes in the AsynchronousInsert queue. │ +│ PendingAsyncInsert │ 7 │ Number of asynchronous inserts that are waiting for flush. │ +│ AsyncInsertCacheSize │ 0 │ Number of async insert hash id in cache │ +└────────────────────────────────────┴───────┴─────────────────────────────────────────────────────────────────────────┘ +``` diff --git a/content/en/altinity-kb-queries-and-syntax/atomic-insert.md b/content/en/altinity-kb-queries-and-syntax/atomic-insert.md index 91351e2dfa..61a96e7d42 100644 --- a/content/en/altinity-kb-queries-and-syntax/atomic-insert.md +++ b/content/en/altinity-kb-queries-and-syntax/atomic-insert.md @@ -10,12 +10,18 @@ An insert will create one part if: * Data is inserted directly into a MergeTree table * Data is inserted into a single partition. +* Smaller blocks are properly squashed up to the configured block size (`min_insert_block_size_rows` and `min_insert_block_size_bytes`) * For INSERT FORMAT: * Number of rows is less than `max_insert_block_size` (default is `1048545`) - * Parallel formatting is disabled (For TSV, TKSV, CSV, and JSONEachRow formats setting `input_format_parallel_parsing=0` is set). -* For INSERT SELECT: - * Number of rows is less than `max_block_size` -* Smaller blocks are properly squashed up to the configured block size (`min_insert_block_size_rows` and `min_insert_block_size_bytes`) + * Parallel formatting is disabled (For TSV, TSKV, CSV, and JSONEachRow formats setting `input_format_parallel_parsing=0` is set). +* For INSERT SELECT (including all variants with table functions), data for insert should be created fully deterministically. + * non-deterministic functions there like rand() not used in SELECT + * Number of rows/bytes is less than `min_insert_block_size_rows` and `min_insert_block_size_bytes` + * And one of: + * setting max_threads to 1 + * adding ORDER BY to the table's DDL (not ordering by tuple) + * There is some ORDER BY inside SELECT + * See [example](https://fiddle.clickhouse.com/48d38d3d-668d-4513-ba21-e595276b3136) * The MergeTree table doesn't have Materialized Views (there is no atomicity Table <> MV) https://github.com/ClickHouse/ClickHouse/issues/9195#issuecomment-587500824 @@ -25,23 +31,23 @@ https://github.com/ClickHouse/ClickHouse/issues/5148#issuecomment-487757235 ### Generate test data in Native and TSV format ( 100 millions rows ) -Text formats and Native format require different set of settings, here I want to find / demonstrate mandatory minumum of settings for any case. +Text formats and Native format require different set of settings, here I want to find / demonstrate mandatory minimum of settings for any case. ```bash clickhouse-client -q \ - 'select toInt64(number) A, toString(number) S from numbers(100000000) format Native' > t.native + 'SELECT toInt64(number) A, toString(number) S FROM numbers(100000000) FORMAT Native' > t.native clickhouse-client -q \ - 'select toInt64(number) A, toString(number) S from numbers(100000000) format TSV' > t.tsv + 'SELECT toInt64(number) A, toString(number) S FROM numbers(100000000) FORMAT TSV' > t.tsv ``` ### Insert with default settings (not atomic) ```bash -drop table if exists trg; -create table trg(A Int64, S String) Engine=MergeTree order by A; +DROP TABLE IF EXISTS trg; +CREATE TABLE trg(A Int64, S String) Engine=MergeTree ORDER BY A; -- Load data in Native format -clickhouse-client -q 'insert into trg format Native' (y <= ts), state_arr, ts_arr)) AS uniq + toStartOfDay(ts) AS ts, + uniqExactMerge(uniqExactState(user_id)) OVER (ORDER BY ts ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS uniq +FROM events +GROUP BY ts +ORDER BY ts ASC + +┌──────────────────ts─┬─uniq─┐ +│ 2021-04-29 00:00:00 │ 2 │ +│ 2021-04-30 00:00:00 │ 3 │ +│ 2021-05-01 00:00:00 │ 4 │ +│ 2021-05-02 00:00:00 │ 5 │ +│ 2021-05-03 00:00:00 │ 7 │ +└─────────────────────┴──────┘ + +SELECT + ts, + uniqExactMerge(state) OVER (ORDER BY ts ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS uniq FROM ( SELECT - toStartOfDay(ts) AS _ts, + toStartOfDay(ts) AS ts, uniqExactState(user_id) AS state FROM events - GROUP BY _ts + GROUP BY ts ) ORDER BY ts ASC @@ -46,11 +58,17 @@ ORDER BY ts ASC │ 2021-05-02 00:00:00 │ 5 │ │ 2021-05-03 00:00:00 │ 7 │ └─────────────────────┴──────┘ +``` -WITH arrayJoin(range(toUInt32(_ts) AS int, least(int + toUInt32((3600 * 24) * 5), toUInt32(toDateTime('2021-05-04 00:00:00'))), 3600 * 24)) AS ts_expanded +## Using arrays + +```sql +WITH + groupArray(_ts) AS ts_arr, + groupArray(state) AS state_arr SELECT - toDateTime(ts_expanded) AS ts, - uniqExactMerge(state) AS uniq + arrayJoin(ts_arr) AS ts, + arrayReduce('uniqExactMerge', arrayFilter((x, y) -> (y <= ts), state_arr, ts_arr)) AS uniq FROM ( SELECT @@ -59,7 +77,6 @@ FROM FROM events GROUP BY _ts ) -GROUP BY ts ORDER BY ts ASC ┌──────────────────ts─┬─uniq─┐ @@ -69,22 +86,20 @@ ORDER BY ts ASC │ 2021-05-02 00:00:00 │ 5 │ │ 2021-05-03 00:00:00 │ 7 │ └─────────────────────┴──────┘ -``` -## Using window functions (starting from Clickhouse 21.3) - -```sql +WITH arrayJoin(range(toUInt32(_ts) AS int, least(int + toUInt32((3600 * 24) * 5), toUInt32(toDateTime('2021-05-04 00:00:00'))), 3600 * 24)) AS ts_expanded SELECT - ts, - uniqExactMerge(state) OVER (ORDER BY ts ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS uniq + toDateTime(ts_expanded) AS ts, + uniqExactMerge(state) AS uniq FROM ( SELECT - toStartOfDay(ts) AS ts, + toStartOfDay(ts) AS _ts, uniqExactState(user_id) AS state FROM events - GROUP BY ts + GROUP BY _ts ) +GROUP BY ts ORDER BY ts ASC ┌──────────────────ts─┬─uniq─┐ diff --git a/content/en/altinity-kb-queries-and-syntax/data-types-on-disk-and-in-ram.md b/content/en/altinity-kb-queries-and-syntax/data-types-on-disk-and-in-ram.md index 2150d339f9..daa99a3301 100644 --- a/content/en/altinity-kb-queries-and-syntax/data-types-on-disk-and-in-ram.md +++ b/content/en/altinity-kb-queries-and-syntax/data-types-on-disk-and-in-ram.md @@ -39,4 +39,4 @@ description: > -See also [https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup41/data_processing.pdf](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup41/data_processing.pdf) (slide 17-22) +See also the presentation [Data processing into ClickHouse®](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup41/data_processing.pdf), especially slides 17-22. diff --git a/content/en/altinity-kb-queries-and-syntax/datetime64.md b/content/en/altinity-kb-queries-and-syntax/datetime64.md index fae72c9f16..601cf478d6 100644 --- a/content/en/altinity-kb-queries-and-syntax/datetime64.md +++ b/content/en/altinity-kb-queries-and-syntax/datetime64.md @@ -2,11 +2,9 @@ title: "DateTime64" linkTitle: "DateTime64" weight: 100 -description: >- - DateTime64 data type --- -## Substract fractional seconds +## Subtract fractional seconds ```sql WITH toDateTime64('2021-09-07 13:41:50.926', 3) AS time diff --git a/content/en/altinity-kb-queries-and-syntax/delete-via-tombstone-column.md b/content/en/altinity-kb-queries-and-syntax/delete-via-tombstone-column.md index 32b7292d9f..61ccb70496 100644 --- a/content/en/altinity-kb-queries-and-syntax/delete-via-tombstone-column.md +++ b/content/en/altinity-kb-queries-and-syntax/delete-via-tombstone-column.md @@ -4,6 +4,12 @@ linkTitle: "DELETE via tombstone column" description: > DELETE via tombstone column --- + +This article provides an overview of the different methods to handle row deletion in ClickHouse, using tombstone columns and ALTER UPDATE or DELETE. The goal is to highlight the performance impacts of different techniques and storage settings, including a scenario using S3 for remote storage. + +1. Creating a Test Table +We will start by creating a simple MergeTree table with a tombstone column (is_active) to track active rows: + ```sql CREATE TABLE test_delete ( @@ -16,7 +22,10 @@ CREATE TABLE test_delete ) ENGINE = MergeTree ORDER BY key; - +``` +2. Inserting Data +Insert sample data into the table: +```sql INSERT INTO test_delete (key, ts, value_a, value_b, value_c) SELECT number, 1, @@ -25,8 +34,12 @@ INSERT INTO test_delete (key, ts, value_a, value_b, value_c) SELECT concat('string', toString(number)) FROM numbers(10000000); -INSERT INTO test_delete (key, ts, value_a, value_b, value_c) VALUES (400000, 2, 'totally different string', 'another totally different string', 'last string'); +INSERT INTO test_delete (key, ts, value_a, value_b, value_c) VALUES (400000, 2, 'totally different string', 'another totally different string', 'last string'); +``` +3. Querying the Data +To verify the inserted data: +```sql SELECT * FROM test_delete WHERE key = 400000; @@ -37,31 +50,49 @@ WHERE key = 400000; ┌────key─┬─ts─┬─value_a──────────────────┬─value_b────────────────┬─value_c──────┬─is_active─┐ │ 400000 │ 1 │ some_looong_string400000 │ another_long_str400000 │ string400000 │ 1 │ └────────┴────┴──────────────────────────┴────────────────────────┴──────────────┴───────────┘ +``` +This should return two rows with different ts values. + +4. Soft Deletion Using ALTER UPDATE +Instead of deleting a row, you can mark it as inactive by setting is_active to 0: +```sql SET mutations_sync = 2; ALTER TABLE test_delete UPDATE is_active = 0 WHERE (key = 400000) AND (ts = 1); - Ok. 0 rows in set. Elapsed: 0.058 sec. - +``` +After updating, you can filter out inactive rows: +```sql SELECT * FROM test_delete -WHERE (key = 400000) AND is_active; - -┌────key─┬─ts─┬─value_a──────────────────┬─value_b──────────────────────────┬─value_c─────┬─is_active─┐ -│ 400000 │ 2 │ totally different string │ another totally different string │ last string │ 1 │ -└────────┴────┴──────────────────────────┴──────────────────────────────────┴─────────────┴───────────┘ +WHERE (key = 400000) AND is_active=0; +┌────key─┬─ts─┬─value_a──────────────────┬─value_b────────────────┬─value_c──────┬─is_active─┐ +│ 400000 │ 1 │ some_looong_string400000 │ another_long_str400000 │ string400000 │ 0 │ +└────────┴────┴──────────────────────────┴────────────────────────┴──────────────┴───────────┘ +``` +5. Hard Deletion Using ALTER DELETE +If you need to completely remove a row from the table, you can use ALTER DELETE: +```sql ALTER TABLE test_delete DELETE WHERE (key = 400000) AND (ts = 1); Ok. 0 rows in set. Elapsed: 1.101 sec. -- 20 times slower!!! +``` +However, this operation is significantly slower compared to the ALTER UPDATE approach. For example: + +ALTER DELETE: Takes around 1.1 seconds +ALTER UPDATE: Only 0.05 seconds +The reason for this difference is that DELETE modifies the physical data structure, while UPDATE merely changes a column value. + +```sql SELECT * FROM test_delete WHERE key = 400000; @@ -70,7 +101,7 @@ WHERE key = 400000; │ 400000 │ 2 │ totally different string │ another totally different string │ last string │ 1 │ └────────┴────┴──────────────────────────┴──────────────────────────────────┴─────────────┴───────────┘ --- For ReplacingMergeTree +-- For ReplacingMergeTree -> https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replacingmergetree OPTIMIZE TABLE test_delete FINAL; @@ -86,3 +117,94 @@ WHERE key = 400000 │ 400000 │ 2 │ totally different string │ another totally different string │ last string │ 1 │ └────────┴────┴──────────────────────────┴──────────────────────────────────┴─────────────┴───────────┘ ``` + +Soft Deletion (via ALTER UPDATE): A quicker approach that does not involve physical data deletion but rather updates the tombstone column. +Hard Deletion (via ALTER DELETE): Can take significantly longer, especially with large datasets stored in remote storage like S3. + +6. Optimizing for Faster Deletion with S3 Storage +If using S3 for storage, the DELETE operation becomes even slower due to the overhead of handling remote data. Here’s an example with a table using S3-backed storage: + +```sql +CREATE TABLE test_delete +( + `key` UInt32, + `value_a` String, + `value_b` String, + `value_c` String, + `is_deleted` UInt8 DEFAULT 0 +) +ENGINE = MergeTree +ORDER BY key +SETTINGS storage_policy = 's3tiered'; + +INSERT INTO test_delete (key, value_a, value_b, value_c) SELECT + number, + concat('some_looong_string', toString(number)), + concat('another_long_str', toString(number)), + concat('really long string', toString(arrayMap(i -> cityHash64(i*number), range(50)))) +FROM numbers(10000000); + +OPTIMIZE TABLE test_delete FINAL; + +ALTER TABLE test_delete MOVE PARTITION tuple() TO DISK 's3disk'; + +SELECT count() FROM test_delete; +┌──count()─┐ +│ 10000000 │ +└──────────┘ +1 row in set. Elapsed: 0.002 sec. +``` + +7. DELETE Using ALTER UPDATE and Row Policy +You can also control visibility at the query level using row policies. For example, to only show rows where is_active = 1: + +To delete a row using ALTER UPDATE: + +```sql +CREATE ROW POLICY pol1 ON test_delete USING is_active=1 TO all; + +SELECT count() FROM test_delete; -- select count() became much slower, it reads data now, not metadata +┌──count()─┐ +│ 10000000 │ +└──────────┘ +1 row in set. Elapsed: 0.314 sec. Processed 10.00 million rows, 10.00 MB (31.84 million rows/s., 31.84 MB/s.) + +ALTER TABLE test_delete UPDATE is_active = 0 WHERE (key = 400000) settings mutations_sync = 2; +0 rows in set. Elapsed: 1.256 sec. + +SELECT count() FROM test_delete; +┌─count()─┐ +│ 9999999 │ +└─────────┘ +``` +This impacts the performance of queries like SELECT count(), as ClickHouse now needs to scan data instead of reading metadata. + +8. DELETE Using ALTER DELETE - https://clickhouse.com/docs/en/sql-reference/statements/alter/delete +To delete a row using ALTER DELETE: + +```sql +ALTER TABLE test_delete DELETE WHERE (key = 400001) settings mutations_sync = 2; +0 rows in set. Elapsed: 955.672 sec. + +SELECT count() FROM test_delete; +┌─count()─┐ +│ 9999998 │ +└─────────┘ +``` +This operation may take significantly longer compared to soft deletions (around 955 seconds in this example for large datasets): + +9. DELETE Using DELETE Statement - https://clickhouse.com/docs/en/sql-reference/statements/delete +The DELETE statement can also be used to remove data from a table: + +```sql +DELETE FROM test_delete WHERE (key = 400002); +0 rows in set. Elapsed: 1.281 sec. + +SELECT count() FROM test_delete; +┌─count()─┐ +│ 9999997 │ +└─────────┘ +``` +This operation is faster, with an elapsed time of around 1.28 seconds in this case: + +The choice between ALTER UPDATE and ALTER DELETE depends on your use case. For soft deletes, updating a tombstone column is significantly faster and easier to manage. However, if you need to physically remove rows, be mindful of the performance costs, especially with remote storage like S3. diff --git a/content/en/altinity-kb-queries-and-syntax/distinct-vs-group-by-vs-limit-by.md b/content/en/altinity-kb-queries-and-syntax/distinct-vs-group-by-vs-limit-by.md index 2ec384e2a9..20779629b2 100644 --- a/content/en/altinity-kb-queries-and-syntax/distinct-vs-group-by-vs-limit-by.md +++ b/content/en/altinity-kb-queries-and-syntax/distinct-vs-group-by-vs-limit-by.md @@ -2,8 +2,6 @@ title: "DISTINCT & GROUP BY & LIMIT 1 BY what the difference" linkTitle: "DISTINCT & GROUP BY & LIMIT 1 BY what the difference" weight: 100 -description: >- - Page description for heading and indexes. --- ## DISTINCT @@ -94,7 +92,7 @@ MemoryTracker: Peak memory usage (for query): 4.05 GiB. 0 rows in set. Elapsed: 4.852 sec. Processed 100.00 million rows, 800.00 MB (20.61 million rows/s., 164.88 MB/s.) -This query faster than first, because ClickHouse doesn't need to merge states for all keys, only for first 1000 (based on LIMIT) +This query faster than first, because ClickHouse® doesn't need to merge states for all keys, only for first 1000 (based on LIMIT) SELECT number % 1000 AS key @@ -119,7 +117,7 @@ MemoryTracker: Peak memory usage (for query): 3.77 MiB. ``` * Multi threaded -* Will return result only after competion of aggregation +* Will return result only after completion of aggregation ## LIMIT BY diff --git a/content/en/altinity-kb-queries-and-syntax/explain-query.md b/content/en/altinity-kb-queries-and-syntax/explain-query.md index 9517af142b..685453e570 100644 --- a/content/en/altinity-kb-queries-and-syntax/explain-query.md +++ b/content/en/altinity-kb-queries-and-syntax/explain-query.md @@ -10,10 +10,12 @@ description: > ```sql EXPLAIN AST SYNTAX - PLAN header = 0, + PLAN indexes = 0, + header = 0, description = 1, actions = 0, optimize = 1 + json = 0 PIPELINE header = 0, graph = 0, compact = 1 @@ -25,7 +27,9 @@ SELECT ... * `SYNTAX` - query text after AST-level optimizations * `PLAN` - query execution plan * `PIPELINE` - query execution pipeline -* `ESTIMATE` - https://github.com/ClickHouse/ClickHouse/pull/26131 (since 21.9) +* `ESTIMATE` - See [Estimates for select query](https://github.com/ClickHouse/ClickHouse/pull/26131), available since ClickHouse® 21.9 +* `indexes=1` supported starting from 21.6 (https://github.com/ClickHouse/ClickHouse/pull/22352 ) +* `json=1` supported starting from 21.6 (https://github.com/ClickHouse/ClickHouse/pull/23082) References diff --git a/content/en/altinity-kb-queries-and-syntax/group-by/_index.md b/content/en/altinity-kb-queries-and-syntax/group-by/_index.md index e7866bc3a9..5a79605fad 100644 --- a/content/en/altinity-kb-queries-and-syntax/group-by/_index.md +++ b/content/en/altinity-kb-queries-and-syntax/group-by/_index.md @@ -6,7 +6,7 @@ keywords: - clickhouse group by - clickhouse memory description: > - Learn about GROUP BY clause in ClickHouse. + Learn about the GROUP BY clause in ClickHouse® weight: 1 --- @@ -14,7 +14,7 @@ weight: 1 [Code](https://github.com/ClickHouse/ClickHouse/blob/8ab5270ded39c8b044f60f73c1de00c8117ab8f2/src/Interpreters/Aggregator.cpp#L382) -ClickHouse uses non-blocking? hash tables, so each thread has at least one hash table. +ClickHouse® uses non-blocking? hash tables, so each thread has at least one hash table. It makes easier to not care about sync between multiple threads, but has such disadvantages as: 1. Bigger memory usage. @@ -52,7 +52,7 @@ https://clickhouse.com/docs/en/sql-reference/statements/select/group-by/#select- ## optimize_aggregation_in_order GROUP BY -Usually it works slower than regular GROUP BY, because ClickHouse need's to read and process data in specific ORDER, which makes it much more complicated to parallelize reading and aggregating. +Usually it works slower than regular GROUP BY, because ClickHouse needs to read and process data in specific ORDER, which makes it much more complicated to parallelize reading and aggregating. But it use much less memory, because ClickHouse can stream resultset and there is no need to keep it in memory. @@ -143,7 +143,7 @@ Size of keys participated in GROUP BY 2. States of aggregation functions: -Be careful with function, which state can use unrestricted amount of memory and grow indefenetely: +Be careful with function, which state can use unrestricted amount of memory and grow indefinitely: - groupArray (groupArray(1000)()) - uniqExact (uniq,uniqCombined) diff --git a/content/en/altinity-kb-queries-and-syntax/group-by/tricks.md b/content/en/altinity-kb-queries-and-syntax/group-by/tricks.md index 1483073b7d..4595d6a279 100644 --- a/content/en/altinity-kb-queries-and-syntax/group-by/tricks.md +++ b/content/en/altinity-kb-queries-and-syntax/group-by/tricks.md @@ -54,7 +54,7 @@ FROM numbers_mt(1000000000) -All queries and datasets are unique, so in different situations different hacks could work better or worsen. +All queries and datasets are unique, so in different situations different hacks could work better or worse. ### PreFilter values before GROUP BY @@ -90,11 +90,11 @@ FORMAT `Null` ### Use Fixed-width data types instead of String -EG you have 2 strings which has values in special form like this +For example, you have 2 strings which has values in special form like this 'ABX 1412312312313' -You can just remove 4 first characters and convert rest of them to UInt64 +You can just remove the first 4 characters and convert the rest to UInt64 toUInt64(substr('ABX 1412312312313',5)) @@ -193,7 +193,7 @@ Elapsed: 6.247 sec. Processed 1.00 billion rows, 27.00 GB (160.09 million rows/s ``` -It can be especially useful when you tries to do GROUP BY lc_column_1, lc_column_2 and ClickHouse falls back to serialized algorytm. +It can be especially useful when you tries to do GROUP BY lc_column_1, lc_column_2 and ClickHouse® falls back to serialized algorithm. ### Two LowCardinality Columns in GROUP BY @@ -281,9 +281,9 @@ Elapsed: 2.910 sec. Processed 1.00 billion rows, 27.00 GB (343.64 million rows/s ``` ### Shard your data by one of common high cardinal GROUP BY key -So on each shard you will have 1/N of all unique combination and this will result in smaller hash table. +So on each shard you will have 1/N of all unique combination and this will result in smaller hash tables. -Lets create 2 distributed tables with different distribution: rand() and by user_id +Let's create 2 distributed tables with different distribution: rand() and by user_id ```sql CREATE TABLE sessions_distributed AS sessions @@ -728,7 +728,7 @@ MemoryTracker: Peak memory usage (for query): 14.55 GiB. ### Reduce number of threads -Because each thread use independent hash table, if you lower thread amount it will reduce number of hash tables as well and lower memory usage at the cost of slower query execution. +Because each thread uses an independent hash table, if you lower thread amount it will reduce number of hash tables as well and lower memory usage at the cost of slower query execution. ```sql @@ -1093,7 +1093,7 @@ https://github.com/ClickHouse/ClickHouse/pull/33439 ### GROUP BY in external memory -Slow +Slow! ### Use hash function for GROUP BY keys diff --git a/content/en/altinity-kb-queries-and-syntax/joins/_index.md b/content/en/altinity-kb-queries-and-syntax/joins/_index.md index 7868e67f86..6f6267594a 100644 --- a/content/en/altinity-kb-queries-and-syntax/joins/_index.md +++ b/content/en/altinity-kb-queries-and-syntax/joins/_index.md @@ -3,9 +3,70 @@ title: "JOINs" linkTitle: "JOINs" description: > JOINs +aliases: + - /altinity-kb-queries-and-syntax/joins/join-table-engine/ --- -See presentation: +Resources: -[https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/join.pdf](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/join.pdf) +* [Overview of JOINs (Russian)](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/join.pdf) - Presentation from Meetup 38 in 2019 +* [Notes on JOIN options](https://excalidraw.com/#json=xX_heZcCu0whsDmC2Mdvo,ppbUVFpPz-flJu5ZDnwIPw) -https://excalidraw.com/#json=xX_heZcCu0whsDmC2Mdvo,ppbUVFpPz-flJu5ZDnwIPw +## Join Table Engine + +The main purpose of JOIN table engine is to avoid building the right table for joining on each query execution. So it's usually used when you have a high amount of fast queries which share the same right table for joining. + +### Updates + +It's possible to update rows with setting `join_any_take_last_row` enabled. + +```sql +CREATE TABLE id_val_join +( + `id` UInt32, + `val` UInt8 +) +ENGINE = Join(ANY, LEFT, id) +SETTINGS join_any_take_last_row = 1 + +Ok. + +INSERT INTO id_val_join VALUES (1,21)(1,22)(3,23); + +Ok. + +SELECT * +FROM +( + SELECT toUInt32(number) AS id + FROM numbers(4) +) AS n +ANY LEFT JOIN id_val_join USING (id) + +┌─id─┬─val─┐ +│ 0 │ 0 │ +│ 1 │ 22 │ +│ 2 │ 0 │ +│ 3 │ 23 │ +└────┴─────┘ + +INSERT INTO id_val_join VALUES (1,40)(2,24); + +Ok. + +SELECT * +FROM +( + SELECT toUInt32(number) AS id + FROM numbers(4) +) AS n +ANY LEFT JOIN id_val_join USING (id) + +┌─id─┬─val─┐ +│ 0 │ 0 │ +│ 1 │ 40 │ +│ 2 │ 24 │ +│ 3 │ 23 │ +└────┴─────┘ +``` + +[Join table engine documentation](https://clickhouse.com/docs/en/engines/table-engines/special/join/) diff --git a/content/en/altinity-kb-queries-and-syntax/joins/join-table-engine.md b/content/en/altinity-kb-queries-and-syntax/joins/join-table-engine.md index 86a4453fad..1b0a6fb757 100644 --- a/content/en/altinity-kb-queries-and-syntax/joins/join-table-engine.md +++ b/content/en/altinity-kb-queries-and-syntax/joins/join-table-engine.md @@ -3,6 +3,7 @@ title: "JOIN table engine" linkTitle: "JOIN table engine" description: > JOIN table engine +draft: true --- The main purpose of JOIN table engine is to avoid building the right table for joining on each query execution. So it's usually used when you have a high amount of fast queries which share the same right table for joining. @@ -60,4 +61,4 @@ ANY LEFT JOIN id_val_join USING (id) └────┴─────┘ ``` -[https://clickhouse.tech/docs/en/engines/table-engines/special/join/](https://clickhouse.tech/docs/en/engines/table-engines/special/join/) +[Join table engine documentation](https://clickhouse.com/docs/en/engines/table-engines/special/join/) diff --git a/content/en/altinity-kb-queries-and-syntax/joins/joins-tricks.md b/content/en/altinity-kb-queries-and-syntax/joins/joins-tricks.md new file mode 100644 index 0000000000..26468cee0b --- /dev/null +++ b/content/en/altinity-kb-queries-and-syntax/joins/joins-tricks.md @@ -0,0 +1,400 @@ +--- +title: "JOIN optimization tricks" +linkTitle: "JOIN optimization tricks" +--- + +All tests below were done with default `hash` join. ClickHouse joins are evolving rapidly and behavior varies with other join types. + +# Data + +For our exercise, we will use two tables from a well known TPS-DS benchmark: store_sales and customer. Table sizes are the following: + +store_sales = 2 billion rows +customer = 12 millions rows + +So there are 200 rows in store_sales table per each customer on average. Also 90% of customers made 1-10 purchases. + +Schema example: + +```sql +CREATE TABLE store_sales +( + `ss_sold_time_sk` DateTime, + `ss_sold_date_sk` Date, + `ss_ship_date_sk` Date, + `ss_item_sk` UInt32, + `ss_customer_sk` UInt32, + `ss_cdemo_sk` UInt32, + `ss_hdemo_sk` UInt32, + `ss_addr_sk` UInt32, + `ss_store_sk` UInt32, + `ss_promo_sk` UInt32, + `ss_ticket_number` UInt32, + `ss_quantity` UInt32, + `ss_wholesale_cost` Float64, + `ss_list_price` Float64, + `ss_sales_price` Float64, + `ss_ext_discount_amt` Float64, + `ss_ext_sales_price` Float64, + `ss_ext_wholesale_cost` Float64, + `ss_ext_list_price` Float64, + `ss_ext_tax` Float64, + `ss_coupon_amt` Float64, + `ss_net_paid` Float64, + `ss_net_paid_inc_tax` Float64, + `ss_net_profit` Float64 +) +ENGINE = MergeTree +ORDER BY ss_ticket_number + +CREATE TABLE customer +( + `c_customer_sk` UInt32, + `c_current_addr_sk` UInt32, + `c_first_shipto_date_sk` Date, + `c_first_sales_date_sk` Date, + `c_salutation` String, + `c_c_first_name` String, + `c_last_name` String, + `c_preferred_cust_flag` String, + `c_birth_date` Date, + `c_birth_country` String, + `c_login` String, + `c_email_address` String, + `c_last_review_date` Date +) +ENGINE = MergeTree +ORDER BY c_customer_id +``` + +# Target query + +```sql +SELECT + sumIf(ss_sales_price, customer.c_first_name = 'James') AS sum_James, + sumIf(ss_sales_price, customer.c_first_name = 'Lisa') AS sum_Lisa, + sum(ss_sales_price) AS sum_total +FROM store_sales +INNER JOIN customer ON store_sales.ss_customer_sk = customer.c_customer_sk +``` + +## Baseline performance + +```sql +SELECT + sumIf(ss_sales_price, customer.c_first_name = 'James') AS sum_James, + sumIf(ss_sales_price, customer.c_first_name = 'Lisa') AS sum_Lisa, + sum(ss_sales_price) AS sum_total +FROM store_sales +INNER JOIN customer ON store_sales.ss_customer_sk = customer.c_customer_sk + +0 rows in set. Elapsed: 188.384 sec. Processed 2.89 billion rows, 40.60 GB (15.37 million rows/s., 216.92 MB/s.) +``` + +## Manual pushdown of conditions + +If we look at our query, we only care if sale belongs to customer named `James` or `Lisa` and dont care for rest of cases. We can use that. + +Usually, ClickHouse is able to pushdown conditions, but not in that case, when conditions itself part of function expression, so you can manually help in those cases. + +```sql +SELECT + sumIf(ss_sales_price, customer.c_first_name = 'James') as sum_James, + sumIf(ss_sales_price, customer.c_first_name = 'Lisa') as sum_Lisa, + sum(ss_sales_price) as sum_total +FROM store_sales LEFT JOIN (SELECT * FROM customer WHERE c_first_name = 'James' OR c_first_name = 'Lisa') as customer ON store_sales.ss_customer_sk = customer.c_customer_sk + +1 row in set. Elapsed: 35.370 sec. Processed 2.89 billion rows, 40.60 GB (81.76 million rows/s., 1.15 GB/s.) +``` + + +## Reduce right table row size + +### Reduce attribute columns (push expression before JOIN step) + +Our row from the right table consists of 2 fields: customer_sk and c_first_name. +First one is needed to JOIN by it, so it's not much we can do here, but we can transform a bit of the second column. + +Again, let's look in how we use this column in main query: + +customer.c_first_name = 'James' +customer.c_first_name = 'Lisa' + +We calculate 2 simple conditions(which don't have any dependency on data from the left table) and nothing more. +It does mean that we can move this calculation to the right table, it will make 3 improvements! + +1. Right table will be smaller -> smaller RAM usage -> better cache hits +2. We will calculate our conditions over a smaller data set. In the right table we have only 10 million rows and after joining because of the left table we have 2 billion rows -> 200 times improvement! +3. Our resulting table after JOIN will not have an expensive String column, only 1 byte UInt8 instead -> less copy of data in memory. + +Let's do it: + +There are several ways to rewrite that query, let's not bother with simple once and go straight to most optimized: + +Put our 2 conditions in hand-made bitmask: + +In order to do that we will take our conditions and multiply them by + +``` +(c_first_name = 'James') + (2 * (c_first_name = 'Lisa') + +C_first_name | (c_first_name = 'James') + (2 * (c_first_name = 'Lisa') + James | 00000001 + Lisa | 00000010 +``` + +As you can see, if you do it in that way, your conditions will not interfere with each other! +But we need to be careful with the wideness of the resulting numeric type. +Let's write our calculations in type notation: +`UInt8 + UInt8*2 -> UInt8 + UInt16 -> UInt32` + +But we actually do not use more than first 2 bits, so we need to cast this expression back to UInt8 + +Last thing to do is use the bitTest function in order to get the result of our condition by its position. + +And resulting query is: + +```sql +SELECT + sumIf(ss_sales_price, bitTest(customer.cond, 0)) AS sum_James, + sumIf(ss_sales_price, bitTest(customer.cond, 1)) AS sum_Lisa, + sum(ss_sales_price) AS sum_total +FROM store_sales +LEFT JOIN +( + SELECT + c_customer_sk, + ((c_first_name = 'James') + (2 * (c_first_name = 'Lisa')))::UInt8 AS cond FROM customer + WHERE (c_first_name = 'James') OR (c_first_name = 'Lisa') +) AS customer ON store_sales.ss_customer_sk = customer.c_customer_sk + +1 row in set. Elapsed: 31.699 sec. Processed 2.89 billion rows, 40.60 GB (91.23 million rows/s., 1.28 GB/s.) +``` + +### Reduce key column size + +But can we make something with our JOIN key column? + +It's type is Nullable(UInt64) + +Let's check if we really need to have a 0…18446744073709551615 range for our customer id, it sure looks like that we have much less people on earth than this number. The same about Nullable trait, we don’t care about Nulls in customer_id + +SELECT max(c_customer_sk) FROM customer + +For sure, we don't need that wide type. +Lets remove Nullable trait and cast column to UInt32, twice smaller in byte size compared to UInt64. + +```sql +SELECT + sumIf(ss_sales_price, bitTest(customer.cond, 0)) AS sum_James, + sumIf(ss_sales_price, bitTest(customer.cond, 1)) AS sum_Lisa, + sum(ss_sales_price) AS sum_total +FROM store_sales +LEFT JOIN +( + SELECT + CAST(c_customer_sk, 'UInt32') AS c_customer_sk, + (c_first_name = 'James') + (2 * (c_first_name = 'Lisa')) AS cond + FROM customer + WHERE (c_first_name = 'James') OR (c_first_name = 'Lisa') +) AS customer ON store_sales.ss_customer_sk_nn = customer.c_customer_sk + +1 row in set. Elapsed: 27.093 sec. Processed 2.89 billion rows, 26.20 GB (106.74 million rows/s., 967.16 MB/s.) +``` + +Another 10% perf improvement from using UInt32 key instead of Nullable(Int64) +Looks pretty neat, we almost got 10 times improvement over our initial query. +Can we do better? + +Probably, but it does mean that we need to get rid of JOIN. + +## Use IN clause instead of JOIN + +Despite that all DBMS support ~ similar feature set, feature performance on different database are different: + +Small example, for PostgreSQL, is recommended to replace big IN clauses with JOINs, because IN clauses have bad performance. +But for ClickHouse it's the opposite!, IN works faster than JOIN, because it only checks key existence in HashSet and doesn't need to extract any data from the right table in IN. + +Let's test that: + +```sql +SELECT + sumIf(ss_sales_price, ss_customer_sk IN ( + SELECT c_customer_sk + FROM customer + WHERE c_first_name = 'James' + )) AS sum_James, + sumIf(ss_sales_price, ss_customer_sk IN ( + SELECT c_customer_sk + FROM customer + WHERE c_first_name = 'Lisa' + )) AS sum_Lisa, + sum(ss_sales_price) AS sum_total +FROM store_sales + +1 row in set. Elapsed: 16.546 sec. Processed 2.90 billion rows, 40.89 GB (175.52 million rows/s., 2.47 GB/s.) +``` + +Almost 2 times faster than our previous record with JOIN, what if we will improve the same hint with c_customer_sk key like in JOIN? + +```sql +SELECT + sumIf(ss_sales_price, ss_customer_sk_nn IN ( + SELECT c_customer_sk::UInt32 + FROM customer + WHERE c_first_name = 'James' + )) AS sum_James, + sumIf(ss_sales_price, ss_customer_sk_nn IN ( + SELECT c_customer_sk::UInt32 + FROM customer + WHERE c_first_name = 'Lisa' + )) AS sum_Lisa, + sum(ss_sales_price) AS sum_total +FROM store_sales + +1 row in set. Elapsed: 12.355 sec. Processed 2.90 billion rows, 26.49 GB (235.06 million rows/s., 2.14 GB/s.) +``` + +Another 25% performance! + +But, there is one big limitation with IN approach, what if we have more than just 2 conditions? + +```sql +SELECT + sumIf(ss_sales_price, ss_customer_sk_nn IN ( + SELECT c_customer_sk::UInt32 + FROM customer + WHERE c_first_name = 'James' + )) AS sum_James, + sumIf(ss_sales_price, ss_customer_sk_nn IN ( + SELECT c_customer_sk::UInt32 + FROM customer + WHERE c_first_name = 'Lisa' + )) AS sum_Lisa, + sumIf(ss_sales_price, ss_customer_sk_nn IN ( + SELECT c_customer_sk::UInt32 + FROM customer + WHERE c_last_name = 'Smith' + )) AS sum_Smith, + sumIf(ss_sales_price, ss_customer_sk_nn IN ( + SELECT c_customer_sk::UInt32 + FROM customer + WHERE c_last_name = 'Williams' + )) AS sum_Williams, + sum(ss_sales_price) AS sum_total +FROM store_sales + +1 row in set. Elapsed: 23.690 sec. Processed 2.93 billion rows, 27.06 GB (123.60 million rows/s., 1.14 GB/s.) +``` + +## Adhoc alternative to Dictionary with FLAT layout + +But first is a short introduction. What the hell is a Dictionary with a FLAT layout? + +Basically, it's just a set of Array's for each attribute where the value position in the attribute array is just a dictionary key +For sure it put heavy limitation about what dictionary key could be, but it gives really good advantages: + +`['Alice','James', 'Robert','John', ...].length = 12mil, Memory usage ~ N*sum(sizeOf(String(N)) + 1)` + +It's really small memory usage (good cache hit rate) & really fast key lookups (no complex hash calculation) + +So, if it's that great what are the caveats? +First one is that your keys should be ideally autoincremental (with small number of gaps) +And for second, lets look in that simple query and write down all calculations: + +```sql +SELECT sumIf(ss_sales_price, dictGet(...) = 'James') +``` + +1. Dictionary call (2 billion times) +2. String equality check (2 billion times) + +Although it's really efficient in terms of dictGet call and memory usage by Dictionary, it still materializes the String column (memcpy) and we pay a penalty of execution condition on top of such a string column for each row. + +But what if we could first calculate our required condition and create such a "Dictionary" ad hoc in query time? + +And we can actually do that! +But let's repeat our analysis again: + +```sql +SELECT sumIf(ss_sales_price, here_lives_unicorns(dictGet(...) = 'James')) +``` + +`['Alice','James', 'Lisa','James', ...].map(x -> multiIf(x = 'James', 1, x = 'Lisa', 2, 0)) => [0,1,2,1,...].length` = 12mil, Memory usage ~ `N*sizeOf(UInt8)` <- It's event smaller than FLAT dictionary + +And actions: + +1. String equality check (12 million times) +2. Create Array (12 million elements) +3. Array call (2 billion times) +4. UInt8 equality check (2 billion times) + +But what is `here_lives_unicorns` function, does it exist in ClickHouse? + +No, but we can hack it with some array manipulation: + +```sql +SELECT sumIf(ss_sales_price, arr[customer_id] = 2) + +WITH ( + SELECT groupArray(assumeNotNull((c_first_name = 'James') + (2 * (c_first_name = 'Lisa')))::UInt8) + FROM + ( + SELECT * + FROM customer + ORDER BY c_customer_sk ASC + ) + ) AS cond +SELECT + sumIf(ss_sales_price, bitTest(cond[ss_customer_sk], 0)) AS sum_James, + sumIf(ss_sales_price, bitTest(cond[ss_customer_sk], 1)) AS sum_Lisa, + sum(ss_sales_price) AS sum_total +FROM store_sales + +1 row in set. Elapsed: 13.006 sec. Processed 2.89 billion rows, 40.60 GB (222.36 million rows/s., 3.12 GB/s.) + +WITH ( + SELECT groupArray(assumeNotNull((c_first_name = 'James') + (2 * (c_first_name = 'Lisa')))::UInt8) + FROM + ( + SELECT * + FROM customer + ORDER BY c_customer_sk ASC + ) + ) AS cond, + bitTest(cond[ss_customer_sk_nn], 0) AS cond_james, + bitTest(cond[ss_customer_sk_nn], 1) AS cond_lisa +SELECT + sumIf(ss_sales_price, cond_james) AS sum_James, + sumIf(ss_sales_price, cond_lisa) AS sum_Lisa, + sum(ss_sales_price) AS sum_total +FROM store_sales + + +1 row in set. Elapsed: 10.054 sec. Processed 2.89 billion rows, 26.20 GB (287.64 million rows/s., 2.61 GB/s.) +``` + +20% faster than the IN approach, what if we will have not 2 but 4 such conditions: + +```sql +WITH ( + SELECT groupArray(assumeNotNull((((c_first_name = 'James') + (2 * (c_first_name = 'Lisa'))) + (4 * (c_last_name = 'Smith'))) + (8 * (c_last_name = 'Williams')))::UInt8) + FROM + ( + SELECT * + FROM customer + ORDER BY c_customer_sk ASC + ) + ) AS cond +SELECT + sumIf(ss_sales_price, bitTest(cond[ss_customer_sk_nn], 0)) AS sum_James, + sumIf(ss_sales_price, bitTest(cond[ss_customer_sk_nn], 1)) AS sum_Lisa, + sumIf(ss_sales_price, bitTest(cond[ss_customer_sk_nn], 2)) AS sum_Smith, + sumIf(ss_sales_price, bitTest(cond[ss_customer_sk_nn], 3)) AS sum_Williams, + sum(ss_sales_price) AS sum_total +FROM store_sales + +1 row in set. Elapsed: 11.454 sec. Processed 2.89 billion rows, 26.39 GB (252.49 million rows/s., 2.30 GB/s.) +``` + +As we can see, that Array approach doesn't even notice that we increased the amount of conditions by 2 times. diff --git a/content/en/altinity-kb-queries-and-syntax/jsonextract-to-parse-many-attributes-at-a-time.md b/content/en/altinity-kb-queries-and-syntax/jsonextract-to-parse-many-attributes-at-a-time.md index db5348511a..38fb071d87 100644 --- a/content/en/altinity-kb-queries-and-syntax/jsonextract-to-parse-many-attributes-at-a-time.md +++ b/content/en/altinity-kb-queries-and-syntax/jsonextract-to-parse-many-attributes-at-a-time.md @@ -4,6 +4,9 @@ linkTitle: "JSONExtract to parse many attributes at a time" description: > JSONExtract to parse many attributes at a time --- + +Don't use several JSONExtract for parsing big JSON. It's very ineffective, slow, and consumes CPU. Try to use one JSONExtract to parse String to Tupes and next get the needed elements: + ```sql WITH JSONExtract(json, 'Tuple(name String, id String, resources Nested(description String, format String, tracking_summary Tuple(total UInt32, recent UInt32)), extras Nested(key String, value String))') AS parsed_json SELECT @@ -15,3 +18,83 @@ SELECT tupleElement(tupleElement(tupleElement(parsed_json, 'resources'), 'tracking_summary'), 'recent') AS `resources.tracking_summary.recent` FROM url('https://raw.githubusercontent.com/jsonlines/guide/master/datagov100.json', 'JSONAsString', 'json String') ``` +However, such parsing requires static schema - all keys should be presented in every row, or you will get an empty structure. More dynamic parsing requires several JSONExtract invocations, but still - try not to scan the same data several times: + +```sql +WITH + '{"timestamp":"2024-06-12T14:30:00.001Z","functionality":"DOCUMENT","flowId":"210abdee-6de5-474a-83da-748def0facc1","step":"BEGIN","env":"dev","successful":true,"data":{"action":"initiate_view","stats":{"total":1,"success":1,"failed":0},"client_ip":"192.168.1.100","client_port":"8080"}}' AS json, + JSONExtractKeysAndValues(json, 'String') AS m, + mapFromArrays(m.1, m.2) AS p +SELECT + extractKeyValuePairs(p['data'])['action'] AS data, + (p['successful']) = 'true' AS successful +FORMAT Vertical + +/* +Row 1: +────── +data: initiate_view +successful: 1 +*/ + +``` + +A good approach to get a proper schema from a json message is to let `clickhouse-local` schema inference do the job: + +```bash +$ ls example_message.json +example_message.json + +$ clickhouse-local --query="DESCRIBE file('example_message.json', 'JSONEachRow')" --format="Vertical"; + +Row 1: +────── +name: resourceLogs +type: Array(Tuple( + resource Nullable(String), + scopeLogs Array(Tuple( + logRecords Array(Tuple( + attributes Array(Tuple( + key Nullable(String), + value Tuple( + stringValue Nullable(String)))), + body Tuple( + stringValue Nullable(String)), + observedTimeUnixNano Nullable(String), + spanId Nullable(String), + traceId Nullable(String))), + scope Nullable(String))))) +``` + +For very subnested dynamic JSON files, if you don't need all the keys, you could parse sublevels specifically. Still this will require several JSONExtract calls but each call will have less data to parse so complexity will be reduced for each pass: O(log n) + +```sql +CREATE TABLE better_parsing (json String) ENGINE = Memory; +INSERT INTO better_parsing FORMAT JSONAsString {"timestamp":"2024-06-12T14:30:00.001Z","functionality":"DOCUMENT","flowId":"210abdee-6de5-474a-83da-748def0facc1","step":"BEGIN","env":"dev","successful":true,"data":{"action":"initiate_view","stats":{"total":1,"success":1,"failed":0},"client_ip":"192.168.1.100","client_port":"8080"}} + +WITH parsed_content AS + ( + SELECT + JSONExtractKeysAndValues(json, 'String') AS 1st_level_arr, + mapFromArrays(1st_level_arr.1, 1st_level_arr.2) AS 1st_level_map, + JSONExtractKeysAndValues(1st_level_map['data'], 'String') AS 2nd_level_arr, + mapFromArrays(2nd_level_arr.1, 2nd_level_arr.2) AS 2nd_level_map, + JSONExtractKeysAndValues(2nd_level_map['stats'], 'String') AS 3rd_level_arr, + mapFromArrays(3rd_level_arr.1, 3rd_level_arr.2) AS 3rd_level_map + FROM json_tests.better_parsing + ) +SELECT + 1st_level_map['timestamp'] AS timestamp, + 2nd_level_map['action'] AS action, + 3rd_level_map['total'] AS total + 3rd_level_map['nokey'] AS no_key_empty +FROM parsed_content + +/* + ┌─timestamp────────────────┬─action────────┬─total─┬─no_key_empty─┐ +1. │ 2024-06-12T14:30:00.001Z │ initiate_view │ 1 │ │ + └──────────────────────────┴───────────────┴───────┴──────────────┘ + +1 row in set. Elapsed: 0.003 sec. +*/ +``` diff --git a/content/en/altinity-kb-queries-and-syntax/lag-lead.md b/content/en/altinity-kb-queries-and-syntax/lag-lead.md index 8db5c6f649..9ca10840ea 100644 --- a/content/en/altinity-kb-queries-and-syntax/lag-lead.md +++ b/content/en/altinity-kb-queries-and-syntax/lag-lead.md @@ -59,7 +59,7 @@ order by g, a; └───┴────────────┴────────────┴────────────┘ ``` -## Using window functions (starting from Clickhouse 21.3) +## Using window functions (starting from ClickHouse® 21.3) ```sql SET allow_experimental_window_functions = 1; diff --git a/content/en/altinity-kb-queries-and-syntax/literal-decimal-or-float.md b/content/en/altinity-kb-queries-and-syntax/literal-decimal-or-float.md index b2246cfe47..cb8cad57ec 100644 --- a/content/en/altinity-kb-queries-and-syntax/literal-decimal-or-float.md +++ b/content/en/altinity-kb-queries-and-syntax/literal-decimal-or-float.md @@ -20,6 +20,21 @@ SELECT └─────────────────┴─────────────┴────────────────────┴─────────────────────┘ ``` + +> When we try to type cast 64.32 to Decimal128(2) the resulted value is 64.31. + +When it sees a number with a decimal separator it interprets as `Float64` literal (where `64.32` have no accurate representation, and actually you get something like `64.319999999999999999`) and later that Float is casted to Decimal by removing the extra precision. + +Workaround is very simple - wrap the number in quotes (and it will be considered as a string literal by query parser, and will be transformed to Decimal directly), or use postgres-alike casting syntax: + +```sql +select cast(64.32,'Decimal128(2)') a, cast('64.32','Decimal128(2)') b, 64.32::Decimal128(2) c; + +┌─────a─┬─────b─┬─────c─┐ +│ 64.31 │ 64.32 │ 64.32 │ +└───────┴───────┴───────┘ +``` + ## Float64 ```sql diff --git a/content/en/altinity-kb-queries-and-syntax/machine-learning-in-clickhouse.md b/content/en/altinity-kb-queries-and-syntax/machine-learning-in-clickhouse.md index 06b52f0340..0fafdb833e 100644 --- a/content/en/altinity-kb-queries-and-syntax/machine-learning-in-clickhouse.md +++ b/content/en/altinity-kb-queries-and-syntax/machine-learning-in-clickhouse.md @@ -4,8 +4,9 @@ linkTitle: "Machine learning in ClickHouse" description: > Machine learning in ClickHouse --- -[https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup31/ml.pdf](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup31/ml.pdf) -[CatBoost / MindsDB / Fast.ai]({{}}) +Resources -[https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/forecast.pdf](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/forecast.pdf) +* [Machine Learning in ClickHouse](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup31/ml.pdf) - Presentation from 2019 (Meetup 31) +* [ML discussion: CatBoost / MindsDB / Fast.ai](../../altinity-kb-integrations/catboost-mindsdb-fast.ai) - Brief article from 2021 +* [Machine Learning Forecase (Russian)](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/forecast.pdf) - Presentation from 2019 (Meetup 38) diff --git a/content/en/altinity-kb-queries-and-syntax/multiple-date-column-in-partition-key.md b/content/en/altinity-kb-queries-and-syntax/multiple-date-column-in-partition-key.md index 86ef6e0efd..9719b0a2e0 100644 --- a/content/en/altinity-kb-queries-and-syntax/multiple-date-column-in-partition-key.md +++ b/content/en/altinity-kb-queries-and-syntax/multiple-date-column-in-partition-key.md @@ -18,7 +18,7 @@ CREATE TABLE part_key_multiple_dates `inserted_at` DateTime ) ENGINE = MergeTree -PARTITION BY (toYYYYMM(date), ignore(created_at), ignore(inserted_at)) +PARTITION BY (toYYYYMM(date), ignore(created_at, inserted_at)) ORDER BY (key, time); diff --git a/content/en/altinity-kb-queries-and-syntax/mutations.md b/content/en/altinity-kb-queries-and-syntax/mutations.md index 9b5093eef1..448e698933 100644 --- a/content/en/altinity-kb-queries-and-syntax/mutations.md +++ b/content/en/altinity-kb-queries-and-syntax/mutations.md @@ -4,7 +4,7 @@ linkTitle: "Mutations" description: > ALTER UPDATE / DELETE --- -Q. How to know if `ALTER TABLE … DELETE/UPDATE mutation ON CLUSTER` was finished successfully on all the nodes? +## How to know if `ALTER TABLE … DELETE/UPDATE mutation ON CLUSTER` was finished successfully on all the nodes? A. mutation status in system.mutations is local to each replica, so use @@ -14,3 +14,27 @@ SELECT hostname(), * FROM clusterAllReplicas('your_cluster_name', system.mutatio ``` Look on `is_done` and `latest_fail_reason` columns + +## Are mutations being run in parallel or they are sequential in ClickHouse® (in scope of one table) + +![Mutations](/assets/mutations4.png) + +ClickHouse runs mutations sequentially, but it can combine several mutations in a single and apply all of them in one merge. +Sometimes, it can lead to problems, when a combined expression which ClickHouse needs to execute becomes really big. (If ClickHouse combined thousands of mutations in one) + + +Because ClickHouse stores data in independent parts, ClickHouse is able to run mutation(s) merges for each part independently and in parallel. +It also can lead to high resource utilization, especially memory usage if you use `x IN (SELECT ... FROM big_table)` statements in mutation, because each merge will run and keep in memory its own HashSet. You can avoid this problem, if you will use [Dictionary approach](../update-via-dictionary) for such mutations. + +Parallelism of mutations controlled by settings: + +```sql +SELECT * +FROM system.merge_tree_settings +WHERE name LIKE '%mutation%' + +┌─name───────────────────────────────────────────────┬─value─┬─changed─┬─description──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─type───┐ +│ max_replicated_mutations_in_queue │ 8 │ 0 │ How many tasks of mutating parts are allowed simultaneously in ReplicatedMergeTree queue. │ UInt64 │ +│ number_of_free_entries_in_pool_to_execute_mutation │ 20 │ 0 │ When there is less than specified number of free entries in pool, do not execute part mutations. This is to leave free threads for regular merges and avoid "Too many parts" │ UInt64 │ +└────────────────────────────────────────────────────┴───────┴─────────┴──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────┘ +``` diff --git a/content/en/altinity-kb-queries-and-syntax/pivot-unpivot.md b/content/en/altinity-kb-queries-and-syntax/pivot-unpivot.md index 52d5996f6e..a2847e045c 100644 --- a/content/en/altinity-kb-queries-and-syntax/pivot-unpivot.md +++ b/content/en/altinity-kb-queries-and-syntax/pivot-unpivot.md @@ -3,6 +3,9 @@ title: "PIVOT / UNPIVOT" linkTitle: "PIVOT / UNPIVOT" description: > PIVOT / UNPIVOT +keywords: + - clickhouse pivot + - clickhouse unpivot --- ## PIVOT @@ -12,7 +15,7 @@ CREATE TABLE sales(suppkey UInt8, category String, quantity UInt32) ENGINE=Memor INSERT INTO sales VALUES (2, 'AA' ,7500),(1, 'AB' , 4000),(1, 'AA' , 6900),(1, 'AB', 8900), (1, 'AC', 8300), (1, 'AA', 7000), (1, 'AC', 9000), (2,'AA', 9800), (2,'AB', 9600), (1,'AC', 8900),(1, 'AD', 400), (2,'AD', 900), (2,'AD', 1200), (1,'AD', 2600), (2, 'AC', 9600),(1, 'AC', 6200); ``` -### Using Map data type (starting from Clickhouse 21.1) +### Using Map data type (starting from ClickHouse® 21.1) ```sql WITH CAST(sumMap([category], [quantity]), 'Map(String, UInt32)') AS map @@ -122,18 +125,14 @@ ORDER BY suppkey ASC │ 3 │ BRAND_C │ AC │ 6900 │ │ 3 │ BRAND_C │ AD │ 3400 │ └─────────┴─────────┴──────────┴──────────┘ -``` - -### Using tupleToNameValuePairs (starting from ClickHouse 21.9) -```sql SELECT suppkey, brand, tpl.1 AS category, tpl.2 AS quantity FROM sales_w -ARRAY JOIN tupleToNameValuePairs((AA, AB, AC, AD)) AS tpl +ARRAY JOIN tupleToNameValuePairs(CAST((AA, AB, AC, AD), 'Tuple(AA UInt32, AB UInt32, AC UInt32, AD UInt32)')) AS tpl ORDER BY suppkey ASC ┌─suppkey─┬─brand───┬─category─┬─quantity─┐ @@ -151,3 +150,4 @@ ORDER BY suppkey ASC │ 3 │ BRAND_C │ AD │ 3400 │ └─────────┴─────────┴──────────┴──────────┘ ``` + diff --git a/content/en/altinity-kb-queries-and-syntax/projections-examples.md b/content/en/altinity-kb-queries-and-syntax/projections-examples.md index 9ce1da7a05..76803bdedf 100644 --- a/content/en/altinity-kb-queries-and-syntax/projections-examples.md +++ b/content/en/altinity-kb-queries-and-syntax/projections-examples.md @@ -1,10 +1,203 @@ --- -title: "Projections examples" -linkTitle: "Projections examples" +title: "ClickHouse® Projections" +linkTitle: "ClickHouse Projections" description: > - Projections examples + Using this ClickHouse feature to optimize queries +keywords: + - clickhouse projections + - clickhouse projection vs materialized view --- -## Aggregating projections + +Projections in ClickHouse act as inner tables within a main table, functioning as a mechanism to optimize queries by using these inner tables when only specific columns are needed. Essentially, a projection is similar to a [Materialized View](/altinity-kb-schema-design/materialized-views/) with an [AggregatingMergeTree engine](/engines/mergetree-table-engine-family/aggregatingmergetree/), designed to be automatically populated with relevant data. + +However, too many projections can lead to excess storage, much like overusing Materialized Views. Projections share the same lifecycle as the main table, meaning they are automatically backfilled and don’t require query rewrites, which is particularly advantageous when integrating with BI tools. + +Projection parts are stored within the main table parts, and their merges occur simultaneously as the main table merges, ensuring data consistency without additional maintenance. + +compared to a separate table+MV setup: +- A separate table gives you more freedom (like partitioning, granularity, etc), but projections - more consistency (parts managed as a whole) +- Projections do not support many features (like indexes and FINAL). That becomes better with recent versions, but still a drawback + +The design approach for projections is the same as for indexes. Create a table and give it to users. If you encounter a slower query, add a projection for that particular query (or set of similar queries). You can create 10+ projections per table, materialize, drop, etc - the very same as indexes. You exchange query speed for disk space/IO and CPU needed to build and rebuild projections on merges. + +## Links + +* Amos Bird - kuaishou.com - Projections in ClickHouse. [slides](https://github.com/ClickHouse/clickhouse-presentations/blob/master/percona2021/projections.pdf). [video](https://youtu.be/jJ5VuLr2k5k?list=PLWhC0zeznqkkNYzcvHEfZ8hly3Cu9ojKk) +* [Documentation](https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/mergetree/#projections) +* [tinybird blog article](https://blog.tinybird.co/2021/07/09/projections/) +* ClickHouse presentation on Projections https://www.youtube.com/watch?v=QDAJTKZT8y4 +* Blog video https://clickhouse.com/videos/how-to-a-clickhouse-query-using-projections + + +## Why is a ClickHouse projection not used? + +A query analyzer should have a reason for using a projection and should not have any limitation to do so. + +- the query should use ONLY the columns defined in the projection. +- There should be a lot of data to read from the main table (gigabytes) +- for ORDER BY projection WHERE statement referring to a column should be in the query +- FINAL queries do not work with projections. +- tables with DELETEd rows do not work with projections. This is because rows in a projection may be affected by a DELETE operation. But there is a MergeTree setting lightweight_mutation_projection_mode to change the behavior (Since 24.7) +- Projection is used only if it is cheaper to read from it than from the table (expected amount of rows and GBs read is smaller) +- Projection should be materialized. Verify that all parts have the needed projection by comparing system.parts and system.projection_parts (see query below) +- a bug in a Clickhouse version. Look at [changelog](https://clickhouse.com/docs/whats-new/changelog) and search for projection. +- If there are many projections per table, the analyzer can select any of them. If you think that it is better, use settings `preferred_optimize_projection_name` or `force_optimize_projection_name` +- If expressions are used instead of plain column names, the query should use the exact expression as defined in the projection with the same functions and modifiers. Use column aliases to make the query the very same as in the projection definition: + +```sql +CREATE TABLE test +( + a Int64, + ts DateTime, + week alias toStartOfWeek(ts), + PROJECTION weekly_projection + ( + SELECT week, sum(a) group by week + ) +) +ENGINE = MergeTree ORDER BY a; + +insert into test +select number, now()-number*100 +from numbers(1e7); + +--explain indexes=1 +select week, sum(a) from test group by week +settings force_optimize_projection=1; +``` + +https://fiddle.clickhouse.com/7f331eb2-9408-4813-9c67-caef4cdd227d + +Explain result: ReadFromMergeTree (weekly_projection) + +``` +Expression ((Project names + Projection)) + Aggregating + Expression + ReadFromMergeTree (weekly_projection) + Indexes: + PrimaryKey + Condition: true + Parts: 9/9 + Granules: 9/1223 +``` + +## check parts + +- has the projection materialized +- does not have lightweight deletes + +``` +SELECT + p.database AS base_database, + p.table AS base_table, + p.name AS base_part_name, -- Name of the part in the base table + p.has_lightweight_delete, + pp.active +FROM system.parts AS p -- Alias for the base table's parts +LEFT JOIN system.projection_parts AS pp -- Alias for the projection's parts +ON p.database = pp.database AND p.table = pp.table + AND p.name = pp.parent_name + AND pp.name = 'projection' +WHERE + p.database = 'database' + AND p.table = 'table' + AND p.active -- Consider only active parts of the base table + -- and not pp.active -- see only missed in the list +ORDER BY p.database, p.table, p.name; + +``` + +## Recalculate on Merge + +What happens in the case of non-trivial background merges in ReplacingMergeTree, AggregatingMergeTree and similar, and OPTIMIZE table DEDUPLICATE queries? + +* Before version 24.8, projections became out of sync with the main data. +* Since version 24.8, it is controlled by a new table-level setting:
[deduplicate_merge_projection_mode](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#deduplicate_merge_projection_mode) = `throw`/`drop`/`rebuild` +* Somewhere later (before 25.3) `ignore` option was introduced. It can be helpful for cases when SummingMergeTree is used with Projections and no DELETE operation in any flavor (Replacing/Collapsing/DELETE/ALTER DELETE) is executed over the table. + +However, projection usage is still disabled for FINAL queries. So, you have to use OPTIMIZE FINAL or SELECT ...GROUP BY instead of FINAL for fighting duplicates between parts + +``` +CREATE TABLE users (uid Int16, name String, version Int16, + projection xx ( + select name,uid,version order by name + ) +) ENGINE=ReplacingMergeTree order by uid +settings deduplicate_merge_projection_mode='rebuild' + ; + +INSERT INTO users +SELECT + number AS uid, + concat('User_', toString(uid)) AS name, + 1 AS version +FROM numbers(100000); + +INSERT INTO users +SELECT + number AS uid, + concat('User_', toString(uid)) AS name, + 2 AS version +FROM numbers(100000); + +SELECT 'duplicate',name,uid,version FROM users +where name ='User_98304' +settings force_optimize_projection=1 ; + +SELECT 'dedup by group by/limit 1 by',name,uid,version FROM users +where name ='User_98304' +order by version DESC +limit 1 by uid +settings force_optimize_projection=1 +; + +optimize table users final ; + +SELECT 'dedup after optimize',name,uid,version FROM users +where name ='User_98304' +settings force_optimize_projection=1 ; + +``` +https://fiddle.clickhouse.com/e1977a66-09ce-43c4-aabc-508c957d44d7 + + +## System tables + +- system.projections +- system.projection_parts +- system.projection_parts_columns + +``` +SELECT + database, + table, + name, + formatReadableSize(sum(data_compressed_bytes) AS size) AS compressed, + formatReadableSize(sum(data_uncompressed_bytes) AS usize) AS uncompressed, + round(usize / size, 2) AS compr_rate, + sum(rows) AS rows, + count() AS part_count +FROM system.projection_parts +WHERE active +GROUP BY + database, + table, + name +ORDER BY size DESC; +``` + +## How to receive a list of tables with projections? + +``` +select database, table from system.tables +where create_table_query ilike '%projection%' + and database <> 'system' +``` + +## Examples + +### Aggregating ClickHouse projections ```sql create table z(Browser String, Country UInt8, F Float64) @@ -61,9 +254,9 @@ group by Browser,Country format Null; Elapsed: 0.005 sec. Processed 22.43 thousand rows ``` -## Emulation of an inverted index using orderby projection +### Emulation of an inverted index using orderby projection -You can create an `orderby projection` and include all columns of a table, but if a table is very wide it will double of stored data. This expample demonstrate a trick, we create an `orderby projection` and include primary key columns and the target column and sort by the target column. This allows using subquery to find primary key values and after that to query the table using the primary key. +You can create an `orderby projection` and include all columns of a table, but if a table is very wide it will double the amount of stored data. This example demonstrate a trick, we create an `orderby projection` and include primary key columns and the target column and sort by the target column. This allows using subquery to find [primary key values](../../engines/mergetree-table-engine-family/pick-keys/) and after that to query the table using the primary key. ```sql CREATE TABLE test_a @@ -112,8 +305,4 @@ VS **Elapsed: 0.013 sec. Processed 32.77 thousand rows** -- optimized -## See also -* Amos Bird - kuaishou.com - Projections in ClickHouse. [slides](https://github.com/ClickHouse/clickhouse-presentations/blob/master/percona2021/projections.pdf). [video](https://youtu.be/jJ5VuLr2k5k?list=PLWhC0zeznqkkNYzcvHEfZ8hly3Cu9ojKk) -* [Documentation](https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/mergetree/#projections) -* [tinybird blog article](https://blog.tinybird.co/2021/07/09/projections/) diff --git a/content/en/altinity-kb-queries-and-syntax/roaring-bitmaps-for-calculating-retention.md b/content/en/altinity-kb-queries-and-syntax/roaring-bitmaps-for-calculating-retention.md index 394bb5e16e..074e25ac4c 100644 --- a/content/en/altinity-kb-queries-and-syntax/roaring-bitmaps-for-calculating-retention.md +++ b/content/en/altinity-kb-queries-and-syntax/roaring-bitmaps-for-calculating-retention.md @@ -41,4 +41,4 @@ WHERE h IN (0, 1) └──────┴───────┘ ``` -See also [https://cdmana.com/2021/01/20210109005922716t.html](https://cdmana.com/2021/01/20210109005922716t.html) +See also [A primer on roaring bitmaps](https://vikramoberoi.com/a-primer-on-roaring-bitmaps-what-they-are-and-how-they-work/) diff --git a/content/en/altinity-kb-queries-and-syntax/row_policy_using_dictionary.md b/content/en/altinity-kb-queries-and-syntax/row_policy_using_dictionary.md new file mode 100644 index 0000000000..1c945c79ae --- /dev/null +++ b/content/en/altinity-kb-queries-and-syntax/row_policy_using_dictionary.md @@ -0,0 +1,346 @@ +--- +title: "Row policies overhead (hiding 'removed' tenants)" +linkTitle: "Row policies overhead" +weight: 100 +description: > + One more approach to hide (delete) rows in ClickHouse® +--- + +## No row policy + +```sql +CREATE TABLE test_delete +( + tenant Int64, + key Int64, + ts DateTime, + value_a String +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(ts) +ORDER BY (tenant, key, ts); + +INSERT INTO test_delete +SELECT + number%5, + number, + toDateTime('2020-01-01')+number/10, + concat('some_looong_string', toString(number)), +FROM numbers(1e8); + +INSERT INTO test_delete -- multiple small tenants +SELECT + number%5000, + number, + toDateTime('2020-01-01')+number/10, + concat('some_looong_string', toString(number)), +FROM numbers(1e8); +``` + +```sql +Q1) SELECT tenant, count() FROM test_delete GROUP BY tenant ORDER BY tenant LIMIT 6; +┌─tenant─┬──count()─┐ +│ 0 │ 20020000 │ +│ 1 │ 20020000 │ +│ 2 │ 20020000 │ +│ 3 │ 20020000 │ +│ 4 │ 20020000 │ +│ 5 │ 20000 │ +└────────┴──────────┘ +6 rows in set. Elapsed: 0.285 sec. Processed 200.00 million rows, 1.60 GB (702.60 million rows/s., 5.62 GB/s.) + +Q2) SELECT uniq(value_a) FROM test_delete where tenant = 4; +┌─uniq(value_a)─┐ +│ 20016427 │ +└───────────────┘ +1 row in set. Elapsed: 0.265 sec. Processed 20.23 million rows, 863.93 MB (76.33 million rows/s., 3.26 GB/s.) + +Q3) SELECT max(ts) FROM test_delete where tenant = 4; +┌─────────────max(ts)─┐ +│ 2020-04-25 17:46:39 │ +└─────────────────────┘ +1 row in set. Elapsed: 0.062 sec. Processed 20.23 million rows, 242.31 MB (324.83 million rows/s., 3.89 GB/s.) + +Q4) SELECT max(ts) FROM test_delete where tenant = 4 and key = 444; +┌─────────────max(ts)─┐ +│ 2020-01-01 00:00:44 │ +└─────────────────────┘ +1 row in set. Elapsed: 0.009 sec. Processed 212.99 thousand rows, 1.80 MB (24.39 million rows/s., 206.36 MB/s.) +``` + +## row policy using expression + +```sql +CREATE ROW POLICY pol1 ON test_delete USING tenant not in (1,2,3) TO all; + +Q1) SELECT tenant, count() FROM test_delete GROUP BY tenant ORDER BY tenant LIMIT 6; +┌─tenant─┬──count()─┐ +│ 0 │ 20020000 │ +│ 4 │ 20020000 │ +│ 5 │ 20000 │ +│ 6 │ 20000 │ +│ 7 │ 20000 │ +│ 8 │ 20000 │ +└────────┴──────────┘ +6 rows in set. Elapsed: 0.333 sec. Processed 140.08 million rows, 1.12 GB (420.59 million rows/s., 3.36 GB/s.) + +Q2) SELECT uniq(value_a) FROM test_delete where tenant = 4; +┌─uniq(value_a)─┐ +│ 20016427 │ +└───────────────┘ +1 row in set. Elapsed: 0.287 sec. Processed 20.23 million rows, 863.93 MB (70.48 million rows/s., 3.01 GB/s.) + +Q3) SELECT max(ts) FROM test_delete where tenant = 4; +┌─────────────max(ts)─┐ +│ 2020-04-25 17:46:39 │ +└─────────────────────┘ +1 row in set. Elapsed: 0.080 sec. Processed 20.23 million rows, 242.31 MB (254.20 million rows/s., 3.05 GB/s.) + +Q4) SELECT max(ts) FROM test_delete where tenant = 4 and key = 444; +┌─────────────max(ts)─┐ +│ 2020-01-01 00:00:44 │ +└─────────────────────┘ +1 row in set. Elapsed: 0.011 sec. Processed 212.99 thousand rows, 3.44 MB (19.53 million rows/s., 315.46 MB/s.) + +Q5) SELECT uniq(value_a) FROM test_delete where tenant = 1; +┌─uniq(value_a)─┐ +│ 0 │ +└───────────────┘ +1 row in set. Elapsed: 0.008 sec. Processed 180.22 thousand rows, 1.44 MB (23.69 million rows/s., 189.54 MB/s.) + +DROP ROW POLICY pol1 ON test_delete; +``` + +## row policy using table subquery + +```sql +create table deleted_tenants(tenant Int64) ENGINE=MergeTree order by tenant; + +CREATE ROW POLICY pol1 ON test_delete USING tenant not in deleted_tenants TO all; + +SELECT tenant, count() FROM test_delete GROUP BY tenant ORDER BY tenant LIMIT 6; +┌─tenant─┬──count()─┐ +│ 0 │ 20020000 │ +│ 1 │ 20020000 │ +│ 2 │ 20020000 │ +│ 3 │ 20020000 │ +│ 4 │ 20020000 │ +│ 5 │ 20000 │ +└────────┴──────────┘ +6 rows in set. Elapsed: 0.455 sec. Processed 200.00 million rows, 1.60 GB (439.11 million rows/s., 3.51 GB/s.) + +insert into deleted_tenants values(1),(2),(3); + +Q1) SELECT tenant, count() FROM test_delete GROUP BY tenant ORDER BY tenant LIMIT 6; +┌─tenant─┬──count()─┐ +│ 0 │ 20020000 │ +│ 4 │ 20020000 │ +│ 5 │ 20000 │ +│ 6 │ 20000 │ +│ 7 │ 20000 │ +│ 8 │ 20000 │ +└────────┴──────────┘ +6 rows in set. Elapsed: 0.329 sec. Processed 140.08 million rows, 1.12 GB (426.34 million rows/s., 3.41 GB/s.) + +Q2) SELECT uniq(value_a) FROM test_delete where tenant = 4; +┌─uniq(value_a)─┐ +│ 20016427 │ +└───────────────┘ +1 row in set. Elapsed: 0.287 sec. Processed 20.23 million rows, 863.93 MB (70.56 million rows/s., 3.01 GB/s.) + +Q3) SELECT max(ts) FROM test_delete where tenant = 4; +┌─────────────max(ts)─┐ +│ 2020-04-25 17:46:39 │ +└─────────────────────┘ +1 row in set. Elapsed: 0.080 sec. Processed 20.23 million rows, 242.31 MB (251.39 million rows/s., 3.01 GB/s.) + +Q4) SELECT max(ts) FROM test_delete where tenant = 4 and key = 444; +┌─────────────max(ts)─┐ +│ 2020-01-01 00:00:44 │ +└─────────────────────┘ +1 row in set. Elapsed: 0.010 sec. Processed 213.00 thousand rows, 3.44 MB (20.33 million rows/s., 328.44 MB/s.) + +Q5) SELECT uniq(value_a) FROM test_delete where tenant = 1; +┌─uniq(value_a)─┐ +│ 0 │ +└───────────────┘ +1 row in set. Elapsed: 0.008 sec. Processed 180.23 thousand rows, 1.44 MB (22.11 million rows/s., 176.90 MB/s.) + +DROP ROW POLICY pol1 ON test_delete; +DROP TABLE deleted_tenants; +``` + +## row policy using external dictionary (NOT dictHas) + +```sql +create table deleted_tenants(tenant Int64, deleted UInt8 default 1) ENGINE=MergeTree order by tenant; + +insert into deleted_tenants(tenant) values(1),(2),(3); + +CREATE DICTIONARY deleted_tenants_dict (tenant UInt64, deleted UInt8) +PRIMARY KEY tenant SOURCE(CLICKHOUSE(TABLE deleted_tenants)) +LIFETIME(600) LAYOUT(FLAT()); + +CREATE ROW POLICY pol1 ON test_delete USING NOT dictHas('deleted_tenants_dict', tenant) TO all; + +Q1) SELECT tenant, count() FROM test_delete GROUP BY tenant ORDER BY tenant LIMIT 6; +┌─tenant─┬──count()─┐ +│ 0 │ 20020000 │ +│ 4 │ 20020000 │ +│ 5 │ 20000 │ +│ 6 │ 20000 │ +│ 7 │ 20000 │ +│ 8 │ 20000 │ +└────────┴──────────┘ +6 rows in set. Elapsed: 0.388 sec. Processed 200.00 million rows, 1.60 GB (515.79 million rows/s., 4.13 GB/s.) + +Q2) SELECT uniq(value_a) FROM test_delete where tenant = 4; +┌─uniq(value_a)─┐ +│ 20016427 │ +└───────────────┘ +1 row in set. Elapsed: 0.291 sec. Processed 20.23 million rows, 863.93 MB (69.47 million rows/s., 2.97 GB/s.) + +Q3) SELECT max(ts) FROM test_delete where tenant = 4; +┌─────────────max(ts)─┐ +│ 2020-04-25 17:46:39 │ +└─────────────────────┘ +1 row in set. Elapsed: 0.084 sec. Processed 20.23 million rows, 242.31 MB (240.07 million rows/s., 2.88 GB/s.) + +Q4) SELECT max(ts) FROM test_delete where tenant = 4 and key = 444; +┌─────────────max(ts)─┐ +│ 2020-01-01 00:00:44 │ +└─────────────────────┘ +1 row in set. Elapsed: 0.010 sec. Processed 212.99 thousand rows, 3.44 MB (21.45 million rows/s., 346.56 MB/s.) + +Q5) SELECT uniq(value_a) FROM test_delete where tenant = 1; +┌─uniq(value_a)─┐ +│ 0 │ +└───────────────┘ +1 row in set. Elapsed: 0.046 sec. Processed 20.22 million rows, 161.74 MB (440.26 million rows/s., 3.52 GB/s.) + +DROP ROW POLICY pol1 ON test_delete; +DROP DICTIONARY deleted_tenants_dict; +DROP TABLE deleted_tenants; +``` + +## row policy using external dictionary (dictHas) + +```sql +create table deleted_tenants(tenant Int64, deleted UInt8 default 1) ENGINE=MergeTree order by tenant; + +insert into deleted_tenants(tenant) select distinct tenant from test_delete where tenant not in (1,2,3); + +CREATE DICTIONARY deleted_tenants_dict (tenant UInt64, deleted UInt8) +PRIMARY KEY tenant SOURCE(CLICKHOUSE(TABLE deleted_tenants)) +LIFETIME(600) LAYOUT(FLAT()); + +CREATE ROW POLICY pol1 ON test_delete USING dictHas('deleted_tenants_dict', tenant) TO all; + +Q1) SELECT tenant, count() FROM test_delete GROUP BY tenant ORDER BY tenant LIMIT 6; +┌─tenant─┬──count()─┐ +│ 0 │ 20020000 │ +│ 4 │ 20020000 │ +│ 5 │ 20000 │ +│ 6 │ 20000 │ +│ 7 │ 20000 │ +│ 8 │ 20000 │ +└────────┴──────────┘ +6 rows in set. Elapsed: 0.399 sec. Processed 200.00 million rows, 1.60 GB (501.18 million rows/s., 4.01 GB/s.) + +Q2) SELECT uniq(value_a) FROM test_delete where tenant = 4; +┌─uniq(value_a)─┐ +│ 20016427 │ +└───────────────┘ +1 row in set. Elapsed: 0.284 sec. Processed 20.23 million rows, 863.93 MB (71.30 million rows/s., 3.05 GB/s.) + +Q3) SELECT max(ts) FROM test_delete where tenant = 4; +┌─────────────max(ts)─┐ +│ 2020-04-25 17:46:39 │ +└─────────────────────┘ +1 row in set. Elapsed: 0.080 sec. Processed 20.23 million rows, 242.31 MB (251.88 million rows/s., 3.02 GB/s.) + +Q4) SELECT max(ts) FROM test_delete where tenant = 4 and key = 444; +┌─────────────max(ts)─┐ +│ 2020-01-01 00:00:44 │ +└─────────────────────┘ +1 row in set. Elapsed: 0.010 sec. Processed 212.99 thousand rows, 3.44 MB (22.01 million rows/s., 355.50 MB/s.) + +Q5) SELECT uniq(value_a) FROM test_delete where tenant = 1; +┌─uniq(value_a)─┐ +│ 0 │ +└───────────────┘ +1 row in set. Elapsed: 0.034 sec. Processed 20.22 million rows, 161.74 MB (589.90 million rows/s., 4.72 GB/s.) + +DROP ROW POLICY pol1 ON test_delete; +DROP DICTIONARY deleted_tenants_dict; +DROP TABLE deleted_tenants; +``` + +## row policy using engine=Set +```sql +create table deleted_tenants(tenant Int64) ENGINE=Set; + +insert into deleted_tenants(tenant) values(1),(2),(3); + +CREATE ROW POLICY pol1 ON test_delete USING tenant not in deleted_tenants TO all; + +Q1) SELECT tenant, count() FROM test_delete GROUP BY tenant ORDER BY tenant LIMIT 6; +┌─tenant─┬──count()─┐ +│ 0 │ 20020000 │ +│ 4 │ 20020000 │ +│ 5 │ 20000 │ +│ 6 │ 20000 │ +│ 7 │ 20000 │ +│ 8 │ 20000 │ +└────────┴──────────┘ +6 rows in set. Elapsed: 0.322 sec. Processed 200.00 million rows, 1.60 GB (621.38 million rows/s., 4.97 GB/s.) + +Q2) SELECT uniq(value_a) FROM test_delete where tenant = 4; +┌─uniq(value_a)─┐ +│ 20016427 │ +└───────────────┘ +1 row in set. Elapsed: 0.275 sec. Processed 20.23 million rows, 863.93 MB (73.56 million rows/s., 3.14 GB/s.) + +Q3) SELECT max(ts) FROM test_delete where tenant = 4; +┌─────────────max(ts)─┐ +│ 2020-04-25 17:46:39 │ +└─────────────────────┘ +1 row in set. Elapsed: 0.084 sec. Processed 20.23 million rows, 242.31 MB (240.07 million rows/s., 2.88 GB/s.) + +Q4) SELECT max(ts) FROM test_delete where tenant = 4 and key = 444; +┌─────────────max(ts)─┐ +│ 2020-01-01 00:00:44 │ +└─────────────────────┘ +1 row in set. Elapsed: 0.010 sec. Processed 212.99 thousand rows, 3.44 MB (20.69 million rows/s., 334.18 MB/s.) + +Q5) SELECT uniq(value_a) FROM test_delete where tenant = 1; +┌─uniq(value_a)─┐ +│ 0 │ +└───────────────┘ +1 row in set. Elapsed: 0.030 sec. Processed 20.22 million rows, 161.74 MB (667.06 million rows/s., 5.34 GB/s.) + +DROP ROW POLICY pol1 ON test_delete; +DROP TABLE deleted_tenants; +``` + + + +## results + +expression: `CREATE ROW POLICY pol1 ON test_delete USING tenant not in (1,2,3) TO all;` + +table subq: `CREATE ROW POLICY pol1 ON test_delete USING tenant not in deleted_tenants TO all;` + +ext. dict. NOT dictHas : `CREATE ROW POLICY pol1 ON test_delete USING NOT dictHas('deleted_tenants_dict', tenant) TO all;` + +ext. dict. dictHas : + +| Q | no policy | expression | table subq | ext. dict. NOT | ext. dict. | engine=Set | +|----|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------| +| Q1 | 0.285 / 200.00m | 0.333 / 140.08m | 0.329 / 140.08m | 0.388 / 200.00m | 0.399 / 200.00m | 0.322 / 200.00m | +| Q2 | 0.265 / 20.23m | 0.287 / 20.23m | 0.287 / 20.23m | 0.291 / 20.23m | 0.284 / 20.23m | 0.275 / 20.23m | +| Q3 | 0.062 / 20.23m | 0.080 / 20.23m | 0.080 / 20.23m | 0.084 / 20.23m | 0.080 / 20.23m | 0.084 / 20.23m | +| Q4 | 0.009 / 212.99t | 0.011 / 212.99t | 0.010 / 213.00t | 0.010 / 212.99t | 0.010 / 212.99t | 0.010 / 212.99t | +| Q5 | | 0.008 / 180.22t | 0.008 / 180.23t | 0.046 / 20.22m | 0.034 / 20.22m | 0.030 / 20.22m | + +Expression in row policy seems to be fastest way (Q1, Q5). diff --git a/content/en/altinity-kb-queries-and-syntax/sampling-example.md b/content/en/altinity-kb-queries-and-syntax/sampling-example.md index 4c28707bd6..b270199107 100644 --- a/content/en/altinity-kb-queries-and-syntax/sampling-example.md +++ b/content/en/altinity-kb-queries-and-syntax/sampling-example.md @@ -1,10 +1,11 @@ --- title: "Sampling Example" linkTitle: "Sampling Example" -description: > - Clickhouse table sampling example --- -The most important idea about sampling that the primary index must have **low cardinality**. The following example demonstrates how sampling can be setup correctly, and an example if it being set up incorrectly as a comparison. + +The most important idea about sampling that the primary index must have **LowCardinality**. (For more information, see [the Altinity Knowledge Base article on LowCardinality](../../altinity-kb-schema-design/lowcardinality) or [a ClickHouse® user\'s lessons learned from LowCardinality](https://altinity.com/blog/2020-5-20-reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer)). + +The following example demonstrates how sampling can be setup correctly, and an example if it being set up incorrectly as a comparison. Sampling requires `sample by expression` . This ensures a range of sampled column types fit within a specified range, which ensures the requirement of low cardinality. In this example, I cannot use `transaction_id` because I can not ensure that the min value of `transaction_id = 0` and `max value = MAX_UINT64`. Instead, I used `cityHash64(transaction_id)`to expand the range within the minimum and maximum values. diff --git a/content/en/altinity-kb-queries-and-syntax/simplestateif-or-ifstate-for-simple-aggregate-functions.md b/content/en/altinity-kb-queries-and-syntax/simplestateif-or-ifstate-for-simple-aggregate-functions.md index f7bbb99eb3..06f81fb7b3 100644 --- a/content/en/altinity-kb-queries-and-syntax/simplestateif-or-ifstate-for-simple-aggregate-functions.md +++ b/content/en/altinity-kb-queries-and-syntax/simplestateif-or-ifstate-for-simple-aggregate-functions.md @@ -6,7 +6,7 @@ description: > --- ### Q. What is SimpleAggregateFunction? Are there advantages to use it instead of AggregateFunction in AggregatingMergeTree? -SimpleAggregateFunction can be used for those aggregations when the function state is exactly the same as the resulting function value. Typical example is `max` function: it only requires storing the single value which is already maximum, and no extra steps needed to get the final value. In contrast `avg` need to store two numbers - sum & count, which should be divided to get the final value of aggregation (done by the `-Merge` step at the very end). +The ClickHouse® SimpleAggregateFunction can be used for those aggregations when the function state is exactly the same as the resulting function value. Typical example is `max` function: it only requires storing the single value which is already maximum, and no extra steps needed to get the final value. In contrast `avg` need to store two numbers - sum & count, which should be divided to get the final value of aggregation (done by the `-Merge` step at the very end). @@ -47,7 +47,7 @@ SimpleAggregateFunction can be used for those aggregations when the function sta - + @@ -77,9 +77,11 @@ SimpleAggregateFunction can be used for those aggregations when the function sta
reading raw value per row you can access it directlyyou need to use finalizeAgggregation functionyou need to use finalizeAggregation function
using aggregated value
-See also -[https://github.com/ClickHouse/ClickHouse/pull/4629](https://github.com/ClickHouse/ClickHouse/pull/4629) -[https://github.com/ClickHouse/ClickHouse/issues/3852](https://github.com/ClickHouse/ClickHouse/issues/3852) +See also: + +* [Altinity Knowledge Base article on AggregatingMergeTree](../../engines/mergetree-table-engine-family/aggregatingmergetree/) +* [https://github.com/ClickHouse/ClickHouse/pull/4629](https://github.com/ClickHouse/ClickHouse/pull/4629) +* [https://github.com/ClickHouse/ClickHouse/issues/3852](https://github.com/ClickHouse/ClickHouse/issues/3852) ### Q. How maxSimpleState combinator result differs from plain max? diff --git a/content/en/altinity-kb-queries-and-syntax/skip-indexes/_index.md b/content/en/altinity-kb-queries-and-syntax/skip-indexes/_index.md index 760784d3b0..962c1355d8 100644 --- a/content/en/altinity-kb-queries-and-syntax/skip-indexes/_index.md +++ b/content/en/altinity-kb-queries-and-syntax/skip-indexes/_index.md @@ -4,3 +4,4 @@ linkTitle: "Skip indexes" description: > Skip indexes --- +ClickHouse® provides a type of index that in specific circumstances can significantly improve query speed. These structures are labeled "skip" indexes because they enable ClickHouse to skip reading significant chunks of data that are guaranteed to have no matching values. \ No newline at end of file diff --git a/content/en/altinity-kb-queries-and-syntax/skip-indexes/skip-index-bloom_filter-for-array-column.md b/content/en/altinity-kb-queries-and-syntax/skip-indexes/skip-index-bloom_filter-for-array-column.md index 0e8a52fe89..610b44093f 100644 --- a/content/en/altinity-kb-queries-and-syntax/skip-indexes/skip-index-bloom_filter-for-array-column.md +++ b/content/en/altinity-kb-queries-and-syntax/skip-indexes/skip-index-bloom_filter-for-array-column.md @@ -1,12 +1,13 @@ --- title: "Skip index bloom_filter Example" linkTitle: "Skip index bloom_filter Example" -description: > - Example: skip index bloom_filter & array column +aliases: + /altinity-kb-queries-and-syntax/skip-indexes/example-skip-index-bloom_filter-and-array-column --- -tested with 20.8.17.25 -[https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/mergetree/\#table_engine-mergetree-data_skipping-indexes](https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/mergetree/#table_engine-mergetree-data_skipping-indexes) +tested with ClickHouse® 20.8.17.25 + +[https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/\#table_engine-mergetree-data_skipping-indexes](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#table_engine-mergetree-data_skipping-indexes) ### Let's create test data @@ -38,7 +39,7 @@ select count() from bftest where has(x, -42); Processed 110.00 million rows, 9.68 GB (217.69 million rows/s., 19.16 GB/s.) ``` -As you can see Clickhouse read **110.00 million rows** and the query elapsed **Elapsed: 0.505 sec**. +As you can see ClickHouse read **110.00 million rows** and the query elapsed **Elapsed: 0.505 sec**. ### Let's add an index @@ -155,4 +156,3 @@ Also no improvement :( Outcome: I would use TYPE bloom_filter GRANULARITY 3. - 2021 Altinity Inc. All rights reserved. diff --git a/content/en/altinity-kb-queries-and-syntax/slow_select_count.md b/content/en/altinity-kb-queries-and-syntax/slow_select_count.md new file mode 100644 index 0000000000..c8a51412e7 --- /dev/null +++ b/content/en/altinity-kb-queries-and-syntax/slow_select_count.md @@ -0,0 +1,40 @@ +--- +title: "Why is simple `SELECT count()` Slow in ClickHouse®?" +linkTitle: "Slow `SELECT count()`" +weight: 100 +description: >- +--- + +ClickHouse is a columnar database that provides excellent performance for analytical queries. However, in some cases, a simple count query can be slow. In this article, we'll explore the reasons why this can happen and how to optimize the query. + +### Three Strategies for Counting Rows in ClickHouse + +There are three ways to count rows in a table in ClickHouse: + +1. `optimize_trivial_count_query`: This strategy extracts the number of rows from the table metadata. It's the fastest and most efficient way to count rows, but it only works for simple count queries. + +2. `allow_experimental_projection_optimization`: This strategy uses a virtual projection called _minmax_count_projection to count rows. It's faster than scanning the table but slower than the trivial count query. + +3. Scanning the smallest column in the table and reading rows from that. This is the slowest strategy and is only used when the other two strategies can't be used. + +### Why Does ClickHouse Sometimes Choose the Slowest Counting Strategy? + +In some cases, ClickHouse may choose the slowest counting strategy even when there are faster options available. Here are some possible reasons why this can happen: + +1. Row policies are used on the table: If row policies are used, ClickHouse needs to filter rows to give the proper count. You can check if row policies are used by selecting from system.row_policies. + +2. Experimental light-weight delete feature was used on the table: If the experimental light-weight delete feature was used, ClickHouse may use the slowest counting strategy. You can check this by looking into parts_columns for the column named _row_exists. To do this, run the following query: + +```sql +SELECT DISTINCT database, table FROM system.parts_columns WHERE column = '_row_exists'; +``` + +You can also refer to this issue on GitHub for more information: https://github.com/ClickHouse/ClickHouse/issues/47930. + +3. `SELECT FINAL` or `final=1` setting is used. + +4. `max_parallel_replicas > 1` is used. + +5. Sampling is used. + +6. Some other features like `allow_experimental_query_deduplication` or `empty_result_for_aggregation_by_empty_set` is used. diff --git a/content/en/altinity-kb-queries-and-syntax/state-and-merge-combinators.md b/content/en/altinity-kb-queries-and-syntax/state-and-merge-combinators.md index 44fe42bb5c..9741685144 100644 --- a/content/en/altinity-kb-queries-and-syntax/state-and-merge-combinators.md +++ b/content/en/altinity-kb-queries-and-syntax/state-and-merge-combinators.md @@ -4,7 +4,11 @@ linkTitle: "-State & -Merge combinators" description: > -State & -Merge combinators --- --State combinator doesn't actually store information about -If combinator, so aggregate functions with -If and without have the same serialized data. + +The -State combinator in ClickHouse® does not store additional information about the -If combinator, which means that aggregate functions with and without -If have the same serialized data structure. This can be verified through various examples, as demonstrated below. + +**Example 1**: maxIfState and maxState +In this example, we use the maxIfState and maxState functions on a dataset of numbers, serialize the result, and merge it using the maxMerge function. ```sql $ clickhouse-local --query "SELECT maxIfState(number,number % 2) as x, maxState(number) as y FROM numbers(10) FORMAT RowBinary" | clickhouse-local --input-format RowBinary --structure="x AggregateFunction(max,UInt64), y AggregateFunction(max,UInt64)" --query "SELECT maxMerge(x), maxMerge(y) FROM table" @@ -13,7 +17,11 @@ $ clickhouse-local --query "SELECT maxIfState(number,number % 2) as x, maxState( 9 10 ``` --State combinator have the same serialized data footprint regardless of parameters used in definition of aggregate function. That's true for quantile\* and sequenceMatch/sequenceCount functions. +In both cases, the -State combinator results in identical serialized data footprints, regardless of the conditions in the -If variant. The maxMerge function merges the state without concern for the original -If condition. + +**Example 2**: quantilesTDigestIfState +Here, we use the quantilesTDigestIfState function to demonstrate that functions like quantile-based and sequence matching functions follow the same principle regarding serialized data consistency. + ```sql $ clickhouse-local --query "SELECT quantilesTDigestIfState(0.1,0.9)(number,number % 2) FROM numbers(1000000) FORMAT RowBinary" | clickhouse-local --input-format RowBinary --structure="x AggregateFunction(quantileTDigestWeighted(0.5),UInt64,UInt8)" --query "SELECT quantileTDigestWeightedMerge(0.4)(x) FROM table" @@ -22,6 +30,12 @@ $ clickhouse-local --query "SELECT quantilesTDigestIfState(0.1,0.9)(number,numbe $ clickhouse-local --query "SELECT quantilesTDigestIfState(0.1,0.9)(number,number % 2) FROM numbers(1000000) FORMAT RowBinary" | clickhouse-local --input-format RowBinary --structure="x AggregateFunction(quantilesTDigestWeighted(0.5),UInt64,UInt8)" --query "SELECT quantilesTDigestWeightedMerge(0.4,0.8)(x) FROM table" [400000,800000] +``` + +**Example 3**: Quantile Functions with -Merge +This example shows how the quantileState and quantileMerge functions work together to calculate a specific quantile. + +```sql SELECT quantileMerge(0.9)(x) FROM ( @@ -34,6 +48,9 @@ FROM └───────────────────────┘ ``` +**Example 4**: sequenceMatch and sequenceCount Functions with -Merge +Finally, we demonstrate the behavior of sequenceMatchState and sequenceMatchMerge, as well as sequenceCountState and sequenceCountMerge, in ClickHouse. + ```sql SELECT sequenceMatchMerge('(?2)(?3)')(x) AS `2_3`, @@ -48,6 +65,11 @@ FROM ┌─2_3─┬─1_4─┬─1_2_3─┐ │ 1 │ 1 │ 0 │ └─────┴─────┴───────┘ +``` + +Similarly, sequenceCountState and sequenceCountMerge functions behave consistently when merging states: + +```sql SELECT sequenceCountMerge('(?1)(?2)')(x) AS `2_3`, @@ -64,3 +86,4 @@ FROM │ 3 │ 0 │ 2 │ └─────┴─────┴───────┘ ``` +ClickHouse's -State combinator stores serialized data in a consistent manner, irrespective of conditions used with -If. The same applies to a wide range of functions, including quantile and sequence-based functions. This behavior ensures that functions like maxMerge, quantileMerge, sequenceMatchMerge, and sequenceCountMerge work seamlessly, even across varied inputs. diff --git a/content/en/altinity-kb-queries-and-syntax/time-zones.md b/content/en/altinity-kb-queries-and-syntax/time-zones.md index 479b94eb50..911347e7a0 100644 --- a/content/en/altinity-kb-queries-and-syntax/time-zones.md +++ b/content/en/altinity-kb-queries-and-syntax/time-zones.md @@ -6,11 +6,11 @@ description: > --- Important things to know: -1. DateTime inside clickhouse is actually UNIX timestamp always, i.e. number of seconds since 1970-01-01 00:00:00 GMT. +1. DateTime inside ClickHouse® is actually UNIX timestamp always, i.e. number of seconds since 1970-01-01 00:00:00 GMT. 2. Conversion from that UNIX timestamp to a human-readable form and reverse can happen on the client (for native clients) and on the server (for HTTP clients, and for some type of queries, like `toString(ts)`) 3. Depending on the place where that conversion happened rules of different timezones may be applied. 4. You can check server timezone using `SELECT timezone()` -5. clickhouse-client also by default tries to use server timezone (see also `--use_client_time_zone` flag) +5. [clickhouse-client](https://docs.altinity.com/altinitycloud/altinity-cloud-connections/clickhouseclient/) also by default tries to use server timezone (see also `--use_client_time_zone` flag) 6. If you want you can store the timezone name inside the data type, in that case, timestamp <-> human-readable time rules of that timezone will be applied. ```sql @@ -34,7 +34,7 @@ toUnixTimestamp(toDateTime(now())): 1626432628 toUnixTimestamp(toDateTime(now(), 'UTC')): 1626432628 ``` -Since version 20.4 clickhouse uses embedded tzdata (see [https://github.com/ClickHouse/ClickHouse/pull/10425](https://github.com/ClickHouse/ClickHouse/pull/10425) ) +Since version 20.4 ClickHouse uses embedded tzdata (see [https://github.com/ClickHouse/ClickHouse/pull/10425](https://github.com/ClickHouse/ClickHouse/pull/10425) ) You get used tzdata version @@ -79,6 +79,16 @@ Query id: 855453d7-eccd-44cb-9631-f63bb02a273c ``` +ClickHouse uses system timezone info from tzdata package if it exists, and uses own builtin tzdata if it is missing in the system. + +``` +cd /usr/share/zoneinfo/Canada +ln -s ../America/Halifax A + +TZ=Canada/A clickhouse-local -q 'select timezone()' +Canada/A +``` + ### When the conversion using different rules happen ```sql @@ -109,4 +119,4 @@ SELECT * FROM t_with_dt_utc └─────────────────────────┘ ``` -Best practice here: use UTC timezone everywhere, OR use the same default timezone for clickhouse server as used by your data +Best practice here: use UTC timezone everywhere, OR use the same default timezone for ClickHouse server as used by your data diff --git a/content/en/altinity-kb-queries-and-syntax/top-n-and-remain.md b/content/en/altinity-kb-queries-and-syntax/top-n-and-remain.md index 923ebd3214..e2aac006dc 100644 --- a/content/en/altinity-kb-queries-and-syntax/top-n-and-remain.md +++ b/content/en/altinity-kb-queries-and-syntax/top-n-and-remain.md @@ -4,6 +4,12 @@ linkTitle: "Top N & Remain" description: > Top N & Remain --- + +When working with large datasets, you may often need to compute the sum of values for the top N groups and aggregate the remainder separately. This article demonstrates several methods to achieve that in ClickHouse. + +Dataset Setup +We'll start by creating a table top_with_rest and inserting data for demonstration purposes: + ```sql CREATE TABLE top_with_rest ( @@ -18,7 +24,10 @@ INSERT INTO top_with_rest SELECT FROM numbers_mt(10000); ``` -## Using UNION ALL +This creates a table with 10,000 numbers, grouped by dividing the numbers into tens. + +## Method 1: Using UNION ALL +This approach retrieves the top 10 groups by sum and aggregates the remaining groups as a separate row. ```sql SELECT * @@ -63,7 +72,9 @@ ORDER BY res ASC └──────┴──────────┘ ``` -## Using arrays + +## Method 2: Using Arrays +In this method, we push the top 10 groups into an array and add a special row for the remainder ```sql WITH toUInt64(sumIf(sum, isNull(k)) - sumIf(sum, isNotNull(k))) AS total @@ -98,7 +109,8 @@ ORDER BY res ASC └──────┴──────────┘ ``` -## Using window functions (starting from 21.1) +## Method 3: Using Window Functions +Window functions, available from ClickHouse version 21.1, provide an efficient way to calculate the sum for the top N rows and the remainder. ```sql SET allow_experimental_window_functions = 1; @@ -139,7 +151,10 @@ ORDER BY res ASC │ null │ 49000050 │ └──────┴──────────┘ ``` +Window functions allow efficient summation of the total and top groups in one query. +## Method 4: Using Row Number and Grouping +This approach calculates the row number (rn) for each group and replaces the remaining groups with NULL. ```sql SELECT k, @@ -183,3 +198,39 @@ ORDER BY res │ null │ 49000050 │ └──────┴──────────┘ ``` +This method uses ROW_NUMBER() to segregate the top N from the rest. + +## Method 5: Using WITH TOTALS +This method includes totals for all groups, and you calculate the remainder on the application side. + +``` +SELECT + k, + sum(number) AS res +FROM top_with_rest +GROUP BY k + WITH TOTALS +ORDER BY res DESC +LIMIT 10 + +┌─k───┬───res─┐ +│ 999 │ 99945 │ +│ 998 │ 99845 │ +│ 997 │ 99745 │ +│ 996 │ 99645 │ +│ 995 │ 99545 │ +│ 994 │ 99445 │ +│ 993 │ 99345 │ +│ 992 │ 99245 │ +│ 991 │ 99145 │ +│ 990 │ 99045 │ +└─────┴───────┘ + +Totals: +┌─k─┬──────res─┐ +│ │ 49995000 │ +└───┴──────────┘ +``` +You would subtract the sum of the top rows from the totals in your application. + +These methods offer different approaches for handling the Top N rows and aggregating the remainder in ClickHouse. Depending on your requirements—whether you prefer using UNION ALL, arrays, window functions, or totals—each method provides flexibility for efficient querying. diff --git a/content/en/altinity-kb-queries-and-syntax/trace_log.md b/content/en/altinity-kb-queries-and-syntax/trace_log.md index 64f445e082..5ef49ddc7a 100644 --- a/content/en/altinity-kb-queries-and-syntax/trace_log.md +++ b/content/en/altinity-kb-queries-and-syntax/trace_log.md @@ -2,13 +2,9 @@ title: "Collecting query execution flamegraphs using system.trace_log" linkTitle: "trace_log" weight: 100 -description: >- - Collecting query execution flamegraph using trace_log --- -## Collecting query execution flamegraph using system.trace_log - -ClickHouse has embedded functionality to analyze the details of query performance. +ClickHouse® has embedded functionality to analyze the details of query performance. It's `system.trace_log` table. @@ -17,7 +13,7 @@ By default it collects information only about queries when runs longer than 1 se You can adjust that per query using settings `query_profiler_real_time_period_ns` & `query_profiler_cpu_time_period_ns`. Both works very similar (with desired interval dump the stacktraces of all the threads which execute the query). -real timer - allows to 'see' the situtions when cpu was not working much, but time was spend for example on IO. +real timer - allows to 'see' the situations when cpu was not working much, but time was spend for example on IO. cpu timer - allows to see the 'hot' points in calculations more accurately (skip the io time). Trying to collect stacktraces with a frequency higher than few KHz is usually not possible. diff --git a/content/en/altinity-kb-queries-and-syntax/troubleshooting.md b/content/en/altinity-kb-queries-and-syntax/troubleshooting.md index d16e7cdd2b..4e7b9bacf3 100644 --- a/content/en/altinity-kb-queries-and-syntax/troubleshooting.md +++ b/content/en/altinity-kb-queries-and-syntax/troubleshooting.md @@ -2,16 +2,22 @@ title: "Troubleshooting" linkTitle: "Troubleshooting" description: > - Troubleshooting + Tips for ClickHouse® troubleshooting --- -## Log of query execution -Controlled by session level setting `send_logs_level` +### Query Execution Logging + +When troubleshooting query execution in ClickHouse®, one of the most useful tools is logging the query execution details. This can be controlled using the session-level setting `send_logs_level`. Here are the different log levels you can use: Possible values: `'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none'` -Can be used with clickhouse-client in both interactive and non-interactive mode. + +This can be used with [clickhouse-client](https://docs.altinity.com/altinitycloud/altinity-cloud-connections/clickhouseclient/) in both interactive and non-interactive mode. + +The logs provide detailed information about query execution, making it easier to identify issues or bottlenecks. You can use the following command to run a query with logging enabled: ```bash $ clickhouse-client -mn --send_logs_level='trace' --query "SELECT sum(number) FROM numbers(1000)" + +-- output -- [LAPTOP] 2021.04.29 00:05:31.425842 [ 25316 ] {14b0646d-8a6e-4b2f-9b13-52a218cf43ba} executeQuery: (from 127.0.0.1:42590, using production parser) SELECT sum(number) FROM numbers(1000) [LAPTOP] 2021.04.29 00:05:31.426281 [ 25316 ] {14b0646d-8a6e-4b2f-9b13-52a218cf43ba} ContextAccess (default): Access granted: CREATE TEMPORARY TABLE ON *.* [LAPTOP] 2021.04.29 00:05:31.426648 [ 25316 ] {14b0646d-8a6e-4b2f-9b13-52a218cf43ba} InterpreterSelectQuery: FetchColumns -> Complete @@ -22,10 +28,18 @@ $ clickhouse-client -mn --send_logs_level='trace' --query "SELECT sum(number) FR [LAPTOP] 2021.04.29 00:05:31.427875 [ 25316 ] {14b0646d-8a6e-4b2f-9b13-52a218cf43ba} executeQuery: Read 1000 rows, 7.81 KiB in 0.0019463 sec., 513795 rows/sec., 3.92 MiB/sec. [LAPTOP] 2021.04.29 00:05:31.427898 [ 25316 ] {14b0646d-8a6e-4b2f-9b13-52a218cf43ba} MemoryTracker: Peak memory usage (for query): 0.00 B. 499500 +``` +You can also redirect the logs to a file for further analysis: +```bash $ clickhouse-client -mn --send_logs_level='trace' --query "SELECT sum(number) FROM numbers(1000)" 2> ./query.log ``` +### Analyzing Logs in System Tables +If you need to analyze the logs after executing a query, you can query the system tables to retrieve the execution details. + +Query Log: You can fetch query logs from the `system.query_log` table: + ```sql LAPTOP.localdomain :) SET send_logs_level='trace'; @@ -60,9 +74,12 @@ Query id: d3db767b-34e9-4252-9f90-348cf958f822 1 rows in set. Elapsed: 0.007 sec. Processed 1.00 thousand rows, 8.00 KB (136.43 thousand rows/s., 1.09 MB/s.) ``` -## system tables +## Analyzing Logs in System Tables + ```sql +# Query Log: You can fetch query logs from the system.query_log table: + SELECT sum(number) FROM numbers(1000); @@ -78,13 +95,15 @@ SELECT * FROM system.query_log WHERE (event_date = today()) AND (query_id = '34c61093-3303-47d0-860b-0d644fa7264b'); -If query_thread_log enabled (SET log_query_threads = 1) +# Query Thread Log: If thread-level logging is enabled (log_query_threads = 1), retrieve logs using: +# To capture detailed thread-level logs, enable log_query_threads: (SET log_query_threads = 1;) SELECT * FROM system.query_thread_log WHERE (event_date = today()) AND (query_id = '34c61093-3303-47d0-860b-0d644fa7264b'); -If opentelemetry_span_log enabled (SET opentelemetry_start_trace_probability = 1, opentelemetry_trace_processors = 1) +# OpenTelemetry Span Log: For detailed tracing with OpenTelemetry, if enabled (opentelemetry_start_trace_probability = 1), use: +# To enable OpenTelemetry tracing for queries, set: (SET opentelemetry_start_trace_probability = 1, opentelemetry_trace_processors = 1) SELECT * FROM system.opentelemetry_span_log @@ -97,10 +116,9 @@ WHERE (trace_id, finish_date) IN ( ); ``` +### Visualizing Query Performance with Flamegraphs - -## Flamegraph - +ClickHouse supports exporting query performance data in a format compatible with speedscope.app. This can help you visualize performance bottlenecks within queries. Example query to generate a flamegraph: [https://www.speedscope.app/](https://www.speedscope.app/) ```sql @@ -113,7 +131,7 @@ WITH SELECT concat('clickhouse-server@', version()) AS exporter, 'https://www.speedscope.app/file-format-schema.json' AS `$schema`, - concat('Clickhouse query id: ', query) AS name, + concat('ClickHouse query id: ', query) AS name, CAST(samples, 'Array(Tuple(type String, name String, unit String, startValue UInt64, endValue UInt64, samples Array(Array(UInt32)), weights Array(UInt32)))') AS profiles, CAST(tuple(arrayMap(x -> (demangle(addressToSymbol(x)), addressToLine(x)), uniq_frames)), 'Tuple(frames Array(Tuple(name String, line String)))') AS shared FROM @@ -142,5 +160,58 @@ FROM ) SETTINGS allow_introspection_functions = 1, output_format_json_named_tuples_as_objects = 1 FORMAT JSONEachRow -SETTINGS output_format_json_named_tuples_as_objects = 1 ``` + +And query to generate traces per thread + +```sql +WITH + '8e7e0616-cfaf-43af-a139-d938ced7655a' AS query, + min(min) AS start_value, + max(max) AS end_value, + groupUniqArrayArrayArray(trace_arr) AS uniq_frames, + arrayMap((x, a, b, c, d) -> ('sampled', concat(b, ' - thread ', c.1, ' - traces ', c.2), 'nanoseconds', d.1 - start_value, d.2 - start_value, arrayMap(s -> reverse(arrayMap(y -> toUInt32(indexOf(uniq_frames, y) - 1), s)), x), a), groupArray(trace_arr), groupArray(weights), groupArray(trace_type), groupArray((thread_id, total)), groupArray((min, max))) AS samples +SELECT + concat('clickhouse-server@', version()) AS exporter, + 'https://www.speedscope.app/file-format-schema.json' AS `$schema`, + concat('ClickHouse query id: ', query) AS name, + CAST(samples, 'Array(Tuple(type String, name String, unit String, startValue UInt64, endValue UInt64, samples Array(Array(UInt32)), weights Array(UInt32)))') AS profiles, + CAST(tuple(arrayMap(x -> (demangle(addressToSymbol(x)), addressToLine(x)), uniq_frames)), 'Tuple(frames Array(Tuple(name String, line String)))') AS shared +FROM +( + SELECT + min(min_ns) AS min, + trace_type, + thread_id, + max(max_ns) AS max, + groupArray(trace) AS trace_arr, + groupArray(cnt) AS weights, + sum(cnt) as total + FROM + ( + SELECT + min(timestamp_ns) AS min_ns, + max(timestamp_ns) AS max_ns, + trace, + trace_type, + thread_id, + sum(if(trace_type IN ('Memory', 'MemoryPeak', 'MemorySample'), size, 1)) AS cnt + FROM system.trace_log + WHERE query_id = query + GROUP BY + trace_type, + trace, + thread_id + ) + GROUP BY + trace_type, + thread_id + ORDER BY + trace_type ASC, + total DESC +) +SETTINGS allow_introspection_functions = 1, output_format_json_named_tuples_as_objects = 1, output_format_json_quote_64bit_integers=1 +FORMAT JSONEachRow +``` + +By enabling detailed logging and tracing, you can effectively diagnose issues and optimize query performance in ClickHouse. diff --git a/content/en/altinity-kb-queries-and-syntax/ts-interpolation.md b/content/en/altinity-kb-queries-and-syntax/ts-interpolation.md index e36d222bb0..eeea98c5dd 100644 --- a/content/en/altinity-kb-queries-and-syntax/ts-interpolation.md +++ b/content/en/altinity-kb-queries-and-syntax/ts-interpolation.md @@ -5,36 +5,54 @@ description: > Time-series alignment with interpolation --- +This article demonstrates how to perform time-series data alignment with interpolation using window functions in ClickHouse. The goal is to align two different time-series (A and B) on the same timestamp axis and fill the missing values using linear interpolation. + +Step-by-Step Implementation +We begin by creating a table with test data that simulates two time-series (A and B) with randomly distributed timestamps and values. Then, we apply interpolation to fill missing values for each time-series based on the surrounding data points. + +#### 1. Drop Existing Table (if it exists) ```sql DROP TABLE test_ts_interpolation; +``` +This ensures that any previous versions of the table are removed. ---- generate test data +#### 2. Generate Test Data +In this step, we generate random time-series data with timestamps and values for series A and B. The values are calculated differently for each series: +```sql CREATE TABLE test_ts_interpolation ENGINE = Log AS SELECT - ((number * 100) + 50) - (rand() % 100) AS timestamp, - transform(rand() % 2, [0, 1], ['A', 'B'], '') AS ts, - if(ts = 'A', timestamp * 10, timestamp * 100) AS value + ((number * 100) + 50) - (rand() % 100) AS timestamp, -- random timestamp generation + transform(rand() % 2, [0, 1], ['A', 'B'], '') AS ts, -- randomly assign series 'A' or 'B' + if(ts = 'A', timestamp * 10, timestamp * 100) AS value -- different value generation for each series FROM numbers(1000000); +``` +Here, the timestamp is generated randomly and assigned to either series A or B using the transform() function. The value is calculated based on the series type (A or B), with different multipliers for each. - +#### 3. Preview the Generated Data +After generating the data, you can inspect it by running a simple SELECT query: +```sql SELECT * FROM test_ts_interpolation; +``` +This will show the randomly generated timestamps, series (A or B), and their corresponding values. --- interpolation select with window functions +#### 4. Perform Interpolation with Window Functions +To align the time-series and interpolate missing values, we use window functions in the following query: +```sql SELECT timestamp, if( ts = 'A', - toFloat64(value), - prev_a.2 + (timestamp - prev_a.1 ) * (next_a.2 - prev_a.2) / ( next_a.1 - prev_a.1) + toFloat64(value), -- If the current series is 'A', keep the original value + prev_a.2 + (timestamp - prev_a.1 ) * (next_a.2 - prev_a.2) / ( next_a.1 - prev_a.1) -- Interpolate for 'A' ) as a_value, if( ts = 'B', - toFloat64(value), - prev_b.2 + (timestamp - prev_b.1 ) * (next_b.2 - prev_b.2) / ( next_b.1 - prev_b.1) + toFloat64(value), -- If the current series is 'B', keep the original value + prev_b.2 + (timestamp - prev_b.1 ) * (next_b.2 - prev_b.2) / ( next_b.1 - prev_b.1) -- Interpolate for 'B' ) as b_value FROM ( @@ -42,11 +60,33 @@ FROM timestamp, ts, value, + -- Find the previous and next values for series 'A' anyLastIf((timestamp,value), ts='A') OVER (ORDER BY timestamp ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) AS prev_a, anyLastIf((timestamp,value), ts='A') OVER (ORDER BY timestamp DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) AS next_a, + -- Find the previous and next values for series 'B' anyLastIf((timestamp,value), ts='B') OVER (ORDER BY timestamp ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) AS prev_b, anyLastIf((timestamp,value), ts='B') OVER (ORDER BY timestamp DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING) AS next_b FROM test_ts_interpolation ) + +``` +#### Explanation: +**Timestamp Alignment:** +We align the timestamps of both series (A and B) and handle missing data points. + +**Interpolation Logic:** +For each A-series timestamp, if the current series is not A, we calculate the interpolated value using the linear interpolation formula: + +```bash +interpolated_value = prev_a.2 + ((timestamp - prev_a.1) / (next_a.1 - prev_a.1)) * (next_a.2 - prev_a.2) ``` +Similarly, for the B series, interpolation is calculated between the previous (prev_b) and next (next_b) known values. + +**Window Functions:** +anyLastIf() is used to fetch the previous or next values for series A and B based on the timestamps. +We use window functions to efficiently calculate these values over the ordered sequence of timestamps. + + +By using window functions and interpolation, we can align time-series data with irregular timestamps and fill in missing values based on nearby data points. This technique is useful in scenarios where data is recorded at different times or irregular intervals across multiple series. + diff --git a/content/en/altinity-kb-queries-and-syntax/ttl/modify-ttl.md b/content/en/altinity-kb-queries-and-syntax/ttl/modify-ttl.md new file mode 100644 index 0000000000..49015d8989 --- /dev/null +++ b/content/en/altinity-kb-queries-and-syntax/ttl/modify-ttl.md @@ -0,0 +1,179 @@ +--- +title: "MODIFY (ADD) TTL in ClickHouse®" +linkTitle: "MODIFY (ADD) TTL" +weight: 100 +description: >- + What happens during a MODIFY or ADD TTL query +keywords: + - clickhouse modify ttl + - clickhouse alter table ttl +--- + +*For a general overview of TTL, see the article [Putting Things Where They Belong Using New TTL Moves](https://altinity.com/blog/2020-3-23-putting-things-where-they-belong-using-new-ttl-moves).* + +## ALTER TABLE tbl MODIFY (ADD) TTL: + +It's 2 step process: + +1. `ALTER TABLE tbl MODIFY (ADD) TTL ...` + +Update table metadata: schema .sql & metadata in ZK. +It's usually cheap and fast command. And any new INSERT after schema change will calculate TTL according to new rule. + + +2. `ALTER TABLE tbl MATERIALIZE TTL` + +Recalculate TTL for already exist parts. +It can be heavy operation, because ClickHouse® will read column data & recalculate TTL & apply TTL expression. +You can disable this step completely by using `materialize_ttl_after_modify` user session setting (by default it's 1, so materialization is enabled). + + +```sql +SET materialize_ttl_after_modify=0; +ALTER TABLE tbl MODIFY TTL +``` + +If you will disable materialization of TTL, it does mean that all old parts will be transformed according OLD TTL rules. +MATERIALIZE TTL: + +1. Recalculate TTL (Kinda cheap, it read only column participate in TTL) +2. Apply TTL (Rewrite of table data for all columns) + +You also can disable apply TTL substep via `materialize_ttl_recalculate_only` merge_tree setting (by default it's 0, so clickhouse will apply TTL expression) + +```sql +ALTER TABLE tbl MODIFY SETTING materialize_ttl_recalculate_only=1; +``` + +It does mean, that TTL rule will not be applied during `ALTER TABLE tbl MODIFY (ADD) TTL ...` query. + +After this you can apply TTL (MATERIALIZE) per partition manually: + +```sql +ALTER TABLE tbl MATERIALIZE TTL [IN PARTITION partition | IN PARTITION ID 'partition_id']; +``` + +The idea of `materialize_ttl_after_modify = 1` is to use `ALTER TABLE tbl MATERIALIZE TTL IN PARTITION xxx; ALTER TABLE tbl MATERIALIZE TTL IN PARTITION yyy;` and materialize TTL gently or drop/move partitions manually until the old data without/old TTL is processed. + +MATERIALIZE TTL done via Mutation: +1. ClickHouse create new parts via hardlinks and write new ttl.txt file +2. ClickHouse remove old(inactive) parts after remove time (default is 8 minutes) + +To stop materialization of TTL: + +```sql +SELECT * FROM system.mutations WHERE is_done=0 AND table = 'tbl'; +KILL MUTATION WHERE command LIKE '%MATERIALIZE TTL%' AND table = 'tbl' +``` + +### MODIFY TTL MOVE + +today: 2022-06-02 + +Table tbl + +Daily partitioning by toYYYYMMDD(timestamp) -> 20220602 + +#### Increase of TTL + +TTL timestamp + INTERVAL 30 DAY MOVE TO DISK s3 -> TTL timestamp + INTERVAL 60 DAY MOVE TO DISK s3 + +* Idea: ClickHouse need to move data from s3 to local disk BACK +* Actual: There is no rule that data earlier than 60 DAY **should be** on local disk + +Table parts: + +``` +20220401 ttl: 20220501 disk: s3 +20220416 ttl: 20220516 disk: s3 +20220501 ttl: 20220531 disk: s3 +20220502 ttl: 20220601 disk: local +20220516 ttl: 20220616 disk: local +20220601 ttl: 20220631 disk: local +``` + +```sql +ALTER TABLE tbl MODIFY TTL timestamp + INTERVAL 60 DAY MOVE TO DISK s3; +``` + +Table parts: + +``` +20220401 ttl: 20220601 disk: s3 +20220416 ttl: 20220616 disk: s3 +20220501 ttl: 20220631 disk: s3 (ClickHouse will not move this part to local disk, because there is no TTL rule for that) +20220502 ttl: 20220701 disk: local +20220516 ttl: 20220716 disk: local +20220601 ttl: 20220731 disk: local +``` + +#### Decrease of TTL + +TTL timestamp + INTERVAL 30 DAY MOVE TO DISK s3 -> TTL timestamp + INTERVAL 14 DAY MOVE TO DISK s3 + +Table parts: + +``` +20220401 ttl: 20220401 disk: s3 +20220416 ttl: 20220516 disk: s3 +20220501 ttl: 20220531 disk: s3 +20220502 ttl: 20220601 disk: local +20220516 ttl: 20220616 disk: local +20220601 ttl: 20220631 disk: local +``` + +```sql +ALTER TABLE tbl MODIFY TTL timestamp + INTERVAL 14 DAY MOVE TO DISK s3; +``` + +Table parts: + +``` +20220401 ttl: 20220415 disk: s3 +20220416 ttl: 20220501 disk: s3 +20220501 ttl: 20220515 disk: s3 +20220502 ttl: 20220517 disk: local (ClickHouse will move this part to disk s3 in background according to TTL rule) +20220516 ttl: 20220601 disk: local (ClickHouse will move this part to disk s3 in background according to TTL rule) +20220601 ttl: 20220616 disk: local +``` + +### Possible TTL Rules + +TTL: +``` +DELETE (With enabled `ttl_only_drop_parts`, it's cheap operation, ClickHouse will drop the whole part) +MOVE +GROUP BY +WHERE +RECOMPRESS +``` + +Related settings: + +Server settings: + +``` +background_move_processing_pool_thread_sleep_seconds | 10 | +background_move_processing_pool_thread_sleep_seconds_random_part | 1.0 | +background_move_processing_pool_thread_sleep_seconds_if_nothing_to_do | 0.1 | +background_move_processing_pool_task_sleep_seconds_when_no_work_min | 10 | +background_move_processing_pool_task_sleep_seconds_when_no_work_max | 600 | +background_move_processing_pool_task_sleep_seconds_when_no_work_multiplier | 1.1 | +background_move_processing_pool_task_sleep_seconds_when_no_work_random_part | 1.0 | +``` + +MergeTree settings: + +``` +merge_with_ttl_timeout │ 14400 │ 0 │ Minimal time in seconds, when merge with delete TTL can be repeated. +merge_with_recompression_ttl_timeout │ 14400 │ 0 │ Minimal time in seconds, when merge with recompression TTL can be repeated. +max_replicated_merges_with_ttl_in_queue │ 1 │ 0 │ How many tasks of merging parts with TTL are allowed simultaneously in ReplicatedMergeTree queue. +max_number_of_merges_with_ttl_in_pool │ 2 │ 0 │ When there is more than specified number of merges with TTL entries in pool, do not assign new merge with TTL. This is to leave free threads for regular merges and avoid "Too many parts" +ttl_only_drop_parts │ 0 │ 0 │ Only drop altogether the expired parts and not partially prune them. +``` + +Session settings: + +``` +materialize_ttl_after_modify │ 1 │ 0 │ Apply TTL for old data, after ALTER MODIFY TTL query +``` diff --git a/content/en/altinity-kb-queries-and-syntax/ttl/ttl-group-by-examples.md b/content/en/altinity-kb-queries-and-syntax/ttl/ttl-group-by-examples.md index dd50b71f64..790c5d72a3 100644 --- a/content/en/altinity-kb-queries-and-syntax/ttl/ttl-group-by-examples.md +++ b/content/en/altinity-kb-queries-and-syntax/ttl/ttl-group-by-examples.md @@ -3,6 +3,8 @@ title: "TTL GROUP BY Examples" linkTitle: "TTL GROUP BY Examples" description: > TTL GROUP BY Examples +aliases: + /altinity-kb-queries-and-syntax/ttl-group-by-examples --- ### Example with MergeTree table @@ -26,9 +28,9 @@ TTL ts + interval 30 day ts = min(toStartOfDay(ts)); ``` -During TTL merges Clickhouse re-calculates values of columns in the SET section. +During TTL merges ClickHouse® re-calculates values of columns in the SET section. -GROUP BY section should be a prefix of a table's ORDER BY. +GROUP BY section should be a prefix of a table's PRIMARY KEY (the same as ORDER BY, if no separate PRIMARY KEY defined). ```sql -- stop merges to demonstrate data before / after @@ -178,7 +180,106 @@ GROUP BY m; └────────┴─────────┴────────────┴────────────────┴────────────────┘ ``` -During merges Clickhouse re-calculates **ts** columns as **min(toStartOfDay(ts))**. It's possible **only for the last column** of `SummingMergeTree` `ORDER BY` section `ORDER BY (key1, key2, toStartOfDay(ts), ts)` otherwise it will **break** the order of rows in the table. +During merges ClickHouse re-calculates **ts** columns as **min(toStartOfDay(ts))**. It's possible **only for the last column** of `SummingMergeTree` `ORDER BY` section `ORDER BY (key1, key2, toStartOfDay(ts), ts)` otherwise it will **break** the order of rows in the table. + +### Example with AggregatingMergeTree table + +```sql +CREATE TABLE test_ttl_group_by_agg +( + `key1` UInt32, + `key2` UInt32, + `ts` DateTime, + `counter` AggregateFunction(count, UInt32) +) +ENGINE = AggregatingMergeTree +PARTITION BY toYYYYMM(ts) +PRIMARY KEY (key1, key2, toStartOfDay(ts)) +ORDER BY (key1, key2, toStartOfDay(ts), ts) +TTL ts + interval 30 day + GROUP BY key1, key2, toStartOfDay(ts) + SET counter = countMergeState(counter), + ts = min(toStartOfDay(ts)); + +CREATE TABLE test_ttl_group_by_raw +( + `key1` UInt32, + `key2` UInt32, + `ts` DateTime +) ENGINE = Null; + +CREATE MATERIALIZED VIEW test_ttl_group_by_mv + TO test_ttl_group_by_agg +AS +SELECT + `key1`, + `key2`, + `ts`, + countState() as counter +FROM test_ttl_group_by_raw +GROUP BY key1, key2, ts; + +-- stop merges to demonstrate data before / after +-- a rolling up +SYSTEM STOP TTL MERGES test_ttl_group_by_agg; +SYSTEM STOP MERGES test_ttl_group_by_agg; + +INSERT INTO test_ttl_group_by_raw (key1, key2, ts) +SELECT + 1, + 1, + toStartOfMinute(now() + number*60) +FROM numbers(100); + +INSERT INTO test_ttl_group_by_raw (key1, key2, ts) +SELECT + 1, + 1, + toStartOfMinute(now() + number*60) +FROM numbers(100); + +INSERT INTO test_ttl_group_by_raw (key1, key2, ts) +SELECT + 1, + 1, + toStartOfMinute(now() + number*60 - toIntervalDay(60)) +FROM numbers(100); + +INSERT INTO test_ttl_group_by_raw (key1, key2, ts) +SELECT + 1, + 1, + toStartOfMinute(now() + number*60 - toIntervalDay(60)) +FROM numbers(100); + +SELECT + toYYYYMM(ts) AS m, + count(), + countMerge(counter) +FROM test_ttl_group_by_agg +GROUP BY m; + +┌──────m─┬─count()─┬─countMerge(counter)─┐ +│ 202307 │ 200 │ 200 │ +│ 202309 │ 200 │ 200 │ +└────────┴─────────┴─────────────────────┘ + +SYSTEM START TTL MERGES test_ttl_group_by_agg; +SYSTEM START MERGES test_ttl_group_by_agg; +OPTIMIZE TABLE test_ttl_group_by_agg FINAL; + +SELECT + toYYYYMM(ts) AS m, + count(), + countMerge(counter) +FROM test_ttl_group_by_agg +GROUP BY m; + +┌──────m─┬─count()─┬─countMerge(counter)─┐ +│ 202307 │ 1 │ 200 │ +│ 202309 │ 100 │ 200 │ +└────────┴─────────┴─────────────────────┘ +``` ### Multilevel TTL Group by @@ -356,3 +457,5 @@ OPTIMIZE TABLE test_ttl_group_by FINAL; └────────┴─────────┴────────────┴────────────────┴────────────────┘ ``` + +Also see the [Altinity Knowledge Base pages on the MergeTree table engine family](../../../engines/mergetree-table-engine-family). \ No newline at end of file diff --git a/content/en/altinity-kb-queries-and-syntax/ttl/ttl-recompress-example.md b/content/en/altinity-kb-queries-and-syntax/ttl/ttl-recompress-example.md index 64539b8fcd..42ed0d8ea4 100644 --- a/content/en/altinity-kb-queries-and-syntax/ttl/ttl-recompress-example.md +++ b/content/en/altinity-kb-queries-and-syntax/ttl/ttl-recompress-example.md @@ -5,6 +5,7 @@ description: > TTL Recompress example --- +*See also [the Altinity Knowledge Base article on testing different compression codecs](../../../altinity-kb-schema-design/codecs/altinity-kb-how-to-test-different-compression-codecs).* ## Example how to create a table and define recompression rules @@ -23,7 +24,7 @@ TTL event_time + toIntervalMonth(1) RECOMPRESS CODEC(ZSTD(1)), event_time + toIntervalMonth(6) RECOMPRESS CODEC(ZSTD(6); ``` -Default comression is LZ4 [https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/\#server-settings-compression](https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-compression) +Default compression is LZ4. See [the ClickHouse® documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#server-settings-compression) for more information. These TTL rules recompress data after 1 and 6 months. @@ -49,4 +50,4 @@ ALTER TABLE hits event_time + toIntervalMonth(6) RECOMPRESS CODEC(ZSTD(6)); ``` -All columns have implicite default compression from server config, except `event_time`, that's why need to change to compression to `Default` for this column otherwise it won't be recompressed. +All columns have implicit default compression from server config, except `event_time`, that's why need to change to compression to `Default` for this column otherwise it won't be recompressed. diff --git a/content/en/altinity-kb-queries-and-syntax/update-via-dictionary.md b/content/en/altinity-kb-queries-and-syntax/update-via-dictionary.md index 2d81cb6847..6b4f93f79b 100644 --- a/content/en/altinity-kb-queries-and-syntax/update-via-dictionary.md +++ b/content/en/altinity-kb-queries-and-syntax/update-via-dictionary.md @@ -101,11 +101,11 @@ FROM test_update ``` {{% alert title="Info" color="info" %}} -In case of Replicated installation, Dictionary should be created on all nodes and source tables should have ReplicatedMergeTree engine and be replicated across all nodes. +In case of Replicated installation, Dictionary should be created on all nodes and source tables should use the [ReplicatedMergeTree](../../altinity-kb-setup-and-maintenance/altinity-kb-converting-mergetree-to-replicated/) engine and be replicated across all nodes. {{% /alert %}} {{% alert title="Info" color="info" %}} -[Starting](https://github.com/ClickHouse/ClickHouse/pull/10186) from 20.4, ClickHouse forbid by default any potential non-deterministic mutations. +[Starting](https://github.com/ClickHouse/ClickHouse/pull/10186) from 20.4, ClickHouse® forbid by default any potential non-deterministic mutations. This behavior controlled by setting `allow_nondeterministic_mutations`. You can append it to query like this `ALTER TABLE xxx UPDATE ... WHERE ... SETTINGS allow_nondeterministic_mutations = 1;` For `ON CLUSTER` queries, you would need to put this setting in default profile and restart ClickHouse servers. {{% /alert %}} diff --git a/content/en/altinity-kb-queries-and-syntax/variable-partitioning.md b/content/en/altinity-kb-queries-and-syntax/variable-partitioning.md new file mode 100644 index 0000000000..8fbd8c5c3b --- /dev/null +++ b/content/en/altinity-kb-queries-and-syntax/variable-partitioning.md @@ -0,0 +1,91 @@ +--- +title: "Adjustable table partitioning" +linkTitle: "Adjustable table partitioning" +weight: 100 +description: >- + An approach that allows you to redefine partitioning without table creation +--- + +In that example, partitioning is being calculated via `MATERIALIZED` column expression `toDate(toStartOfInterval(ts, toIntervalT(...)))`, but partition id also can be generated on application side and inserted to ClickHouse® as is. + +```sql +CREATE TABLE tbl +( + `ts` DateTime, + `key` UInt32, + `partition_key` Date MATERIALIZED toDate(toStartOfInterval(ts, toIntervalYear(1))) +) +ENGINE = MergeTree +PARTITION BY (partition_key, ignore(ts)) +ORDER BY key; + +SET send_logs_level = 'trace'; + +INSERT INTO tbl SELECT toDateTime(toDate('2020-01-01') + number) as ts, number as key FROM numbers(300); + +Renaming temporary part tmp_insert_20200101-0_1_1_0 to 20200101-0_1_1_0 + +INSERT INTO tbl SELECT toDateTime(toDate('2021-01-01') + number) as ts, number as key FROM numbers(300); + +Renaming temporary part tmp_insert_20210101-0_2_2_0 to 20210101-0_2_2_0 + +ALTER TABLE tbl + MODIFY COLUMN `partition_key` Date MATERIALIZED toDate(toStartOfInterval(ts, toIntervalMonth(1))); + +INSERT INTO tbl SELECT toDateTime(toDate('2022-01-01') + number) as ts, number as key FROM numbers(300); + +Renaming temporary part tmp_insert_20220101-0_3_3_0 to 20220101-0_3_3_0 +Renaming temporary part tmp_insert_20220201-0_4_4_0 to 20220201-0_4_4_0 +Renaming temporary part tmp_insert_20220301-0_5_5_0 to 20220301-0_5_5_0 +Renaming temporary part tmp_insert_20220401-0_6_6_0 to 20220401-0_6_6_0 +Renaming temporary part tmp_insert_20220501-0_7_7_0 to 20220501-0_7_7_0 +Renaming temporary part tmp_insert_20220601-0_8_8_0 to 20220601-0_8_8_0 +Renaming temporary part tmp_insert_20220701-0_9_9_0 to 20220701-0_9_9_0 +Renaming temporary part tmp_insert_20220801-0_10_10_0 to 20220801-0_10_10_0 +Renaming temporary part tmp_insert_20220901-0_11_11_0 to 20220901-0_11_11_0 +Renaming temporary part tmp_insert_20221001-0_12_12_0 to 20221001-0_12_12_0 + + +ALTER TABLE tbl + MODIFY COLUMN `partition_key` Date MATERIALIZED toDate(toStartOfInterval(ts, toIntervalDay(1))); + +INSERT INTO tbl SELECT toDateTime(toDate('2023-01-01') + number) as ts, number as key FROM numbers(5); + +Renaming temporary part tmp_insert_20230101-0_13_13_0 to 20230101-0_13_13_0 +Renaming temporary part tmp_insert_20230102-0_14_14_0 to 20230102-0_14_14_0 +Renaming temporary part tmp_insert_20230103-0_15_15_0 to 20230103-0_15_15_0 +Renaming temporary part tmp_insert_20230104-0_16_16_0 to 20230104-0_16_16_0 +Renaming temporary part tmp_insert_20230105-0_17_17_0 to 20230105-0_17_17_0 + + +SELECT _partition_id, min(ts), max(ts), count() FROM tbl GROUP BY _partition_id ORDER BY _partition_id; + +┌─_partition_id─┬─────────────min(ts)─┬─────────────max(ts)─┬─count()─┐ +│ 20200101-0 │ 2020-01-01 00:00:00 │ 2020-10-26 00:00:00 │ 300 │ +│ 20210101-0 │ 2021-01-01 00:00:00 │ 2021-10-27 00:00:00 │ 300 │ +│ 20220101-0 │ 2022-01-01 00:00:00 │ 2022-01-31 00:00:00 │ 31 │ +│ 20220201-0 │ 2022-02-01 00:00:00 │ 2022-02-28 00:00:00 │ 28 │ +│ 20220301-0 │ 2022-03-01 00:00:00 │ 2022-03-31 00:00:00 │ 31 │ +│ 20220401-0 │ 2022-04-01 00:00:00 │ 2022-04-30 00:00:00 │ 30 │ +│ 20220501-0 │ 2022-05-01 00:00:00 │ 2022-05-31 00:00:00 │ 31 │ +│ 20220601-0 │ 2022-06-01 00:00:00 │ 2022-06-30 00:00:00 │ 30 │ +│ 20220701-0 │ 2022-07-01 00:00:00 │ 2022-07-31 00:00:00 │ 31 │ +│ 20220801-0 │ 2022-08-01 00:00:00 │ 2022-08-31 00:00:00 │ 31 │ +│ 20220901-0 │ 2022-09-01 00:00:00 │ 2022-09-30 00:00:00 │ 30 │ +│ 20221001-0 │ 2022-10-01 00:00:00 │ 2022-10-27 00:00:00 │ 27 │ +│ 20230101-0 │ 2023-01-01 00:00:00 │ 2023-01-01 00:00:00 │ 1 │ +│ 20230102-0 │ 2023-01-02 00:00:00 │ 2023-01-02 00:00:00 │ 1 │ +│ 20230103-0 │ 2023-01-03 00:00:00 │ 2023-01-03 00:00:00 │ 1 │ +│ 20230104-0 │ 2023-01-04 00:00:00 │ 2023-01-04 00:00:00 │ 1 │ +│ 20230105-0 │ 2023-01-05 00:00:00 │ 2023-01-05 00:00:00 │ 1 │ +└───────────────┴─────────────────────┴─────────────────────┴─────────┘ + + +SELECT count() FROM tbl WHERE ts > '2023-01-04'; + +Key condition: unknown +MinMax index condition: (column 0 in [1672758001, +Inf)) +Selected 1/17 parts by partition key, 1 parts by primary key, 1/1 marks by primary key, 1 marks to read from 1 ranges +Spreading mark ranges among streams (default reading) +Reading 1 ranges in order from part 20230105-0_17_17_0, approx. 1 rows starting from 0 +``` diff --git a/content/en/altinity-kb-queries-and-syntax/window-functions.md b/content/en/altinity-kb-queries-and-syntax/window-functions.md index 6e55afca69..c4e26b62a1 100644 --- a/content/en/altinity-kb-queries-and-syntax/window-functions.md +++ b/content/en/altinity-kb-queries-and-syntax/window-functions.md @@ -4,19 +4,14 @@ linkTitle: "Window functions" description: > Window functions --- -| Link | [blog.tinybird.co/2021/03/16/c…](https://blog.tinybird.co/2021/03/16/coming-soon-on-clickhouse-window-functions/) | -| :--- | :--- | -| Date | Mar 26, 2021 | -![Windows Function Slides](https://api.microlink.io/?adblock=false&meta=false&screenshot&element=%23screenshot&embed=screenshot.url&url=https%3A%2F%2Fcards.microlink.io%2F%3Fpreset%3Dtinybird%26subtitle%3Dtips%26text%3DWindow%2Bfunctions%252C%2Bnested%2Bdata%252C%2BA%2BPostgreSQL%2Bengine%2Band%2Bmore) +#### Resources: -[blog.tinybird.co/2021/03/16/c…](https://blog.tinybird.co/2021/03/16/coming-soon-on-clickhouse-window-functions/) +* [Tutorial: ClickHouse® Window Functions](https://altinity.com/blog/clickhouse-window-functions-current-state-of-the-art) +* [Video: Fun with ClickHouse Window Functions](https://www.youtube.com/watch?v=sm_vUdMQz4s) +* [Blog: Battle of the Views: ClickHouse Window View vs. Live View](https://altinity.com/blog/battle-of-the-views-clickhouse-window-view-vs-live-view) -> An exploration on what's possible to do with the most recent experimental feature on ClickHouse - window functions, and an overview of other interesting feat... - -[Windows Functions Blog Link](https://blog.tinybird.co/2021/03/16/coming-soon-on-clickhouse-window-functions/) - -#### How Do I Simulate Window Functions Using Arrays on older versions of clickhouse? +#### How Do I Simulate Window Functions Using Arrays on older versions of ClickHouse? 1. Group with groupArray. 2. Calculate the needed metrics. diff --git a/content/en/altinity-kb-schema-design/_index.md b/content/en/altinity-kb-schema-design/_index.md index 7ffb0165c0..d59eb63d6e 100644 --- a/content/en/altinity-kb-schema-design/_index.md +++ b/content/en/altinity-kb-schema-design/_index.md @@ -6,6 +6,6 @@ keywords: - clickhouse lowcardinality - clickhouse materialized view description: > - All you need to know about ClickHouse schema design, including materialized view, limitations, lowcardinality, codecs. + All you need to know about ClickHouse® schema design, including materialized view, limitations, lowcardinality, codecs. weight: 7 --- diff --git a/content/en/altinity-kb-schema-design/altinity-kb-dictionaries-vs-lowcardinality.md b/content/en/altinity-kb-schema-design/altinity-kb-dictionaries-vs-lowcardinality.md index 5593b87658..a3b9e7ef26 100644 --- a/content/en/altinity-kb-schema-design/altinity-kb-dictionaries-vs-lowcardinality.md +++ b/content/en/altinity-kb-schema-design/altinity-kb-dictionaries-vs-lowcardinality.md @@ -16,4 +16,4 @@ From the other hand: if data can be changed in future, and that change should im For example if you need to change the used currency rare every day- it would be quite stupid to update all historical records to apply the newest exchange rate. And putting it to dict will allow to do calculations with latest exchange rate at select time. -For dictionary it's possible to mark some of the attributes as injective. An attribute is called injective if different attribute values correspond to different keys. It would allow ClickHouse to replace dictGet call in GROUP BY with cheap dict key. +For dictionary it's possible to mark some of the attributes as injective. An attribute is called injective if different attribute values correspond to different keys. It would allow ClickHouse® to replace dictGet call in GROUP BY with cheap dict key. diff --git a/content/en/altinity-kb-schema-design/altinity-kb-jsonasstring-and-mat.-view-as-json-parser.md b/content/en/altinity-kb-schema-design/altinity-kb-jsonasstring-and-mat.-view-as-json-parser.md index b10df86000..feca7d9434 100644 --- a/content/en/altinity-kb-schema-design/altinity-kb-jsonasstring-and-mat.-view-as-json-parser.md +++ b/content/en/altinity-kb-schema-design/altinity-kb-jsonasstring-and-mat.-view-as-json-parser.md @@ -35,4 +35,4 @@ SELECT * FROM datastore; └──────┴────┴─────┘ ``` -See also: [JSONExtract to parse many attributes at a time](../altinity-kb-queries-and-syntax/jsonextract-to-parse-many-attributes-at-a-time/) +See also: [JSONExtract to parse many attributes at a time](/altinity-kb-queries-and-syntax/jsonextract-to-parse-many-attributes-at-a-time/) diff --git a/content/en/altinity-kb-schema-design/altinity-kb-jsoneachrow-tuples-and-mvs.md b/content/en/altinity-kb-schema-design/altinity-kb-jsoneachrow-tuples-and-mvs.md new file mode 100644 index 0000000000..b17fa42788 --- /dev/null +++ b/content/en/altinity-kb-schema-design/altinity-kb-jsoneachrow-tuples-and-mvs.md @@ -0,0 +1,151 @@ +--- +title: "JSONEachRow, Tuples, Maps and Materialized Views" +linkTitle: "JSONEachRow, tuple, map and MVs" +weight: 100 +description: >- + How to use Tuple() and Map() with nested JSON messages in MVs +--- + +## Using JSONEachRow with Tuple() in Materialized views + +Sometimes we can have a nested json message with a fixed size structure like this: + +```json +{"s": "val1", "t": {"i": 42, "d": "2023-09-01 12:23:34.231"}} +``` + +Values can be NULL but the structure should be fixed. In this case we can use `Tuple()` to parse the JSON message: + +```sql +CREATE TABLE tests.nest_tuple_source +( + `s` String, + `t` Tuple(`i` UInt8, `d` DateTime64(3)) +) +ENGINE = Null +``` + +We can use the above table as a source for a materialized view, like it was a Kafka table and in case our message has unexpected keys we make the Kafka table ignore them with the setting (23.3+): + +`input_format_json_ignore_unknown_keys_in_named_tuple = 1` + +```sql +CREATE MATERIALIZED VIEW tests.mv_nest_tuple TO tests.nest_tuple_destination +AS +SELECT + s AS s, + t.1 AS i, + t.2 AS d +FROM tests.nest_tuple_source +``` + +Also, we need a destination table with an adapted structure as the source table: + +```sql +CREATE TABLE tests.nest_tuple_destination +( + `s` String, + `i` UInt8, + `d` DateTime64(3) +) +ENGINE = MergeTree +ORDER BY tuple() + +INSERT INTO tests.nest_tuple_source FORMAT JSONEachRow {"s": "val1", "t": {"i": 42, "d": "2023-09-01 12:23:34.231"}} + + +SELECT * +FROM nest_tuple_destination + +┌─s────┬──i─┬───────────────────────d─┐ +│ val1 │ 42 │ 2023-09-01 12:23:34.231 │ +└──────┴────┴─────────────────────────┘ +``` + +Some hints: + +- 💡 Beware of column names in ClickHouse® they are Case sensitive. If a JSON message has the key names in Capitals, the Kafka/Source table should have the same column names in Capitals. + +- 💡 Also this `Tuple()` approach is not for Dynamic json schemas as explained above. In the case of having a dynamic schema, use the classic approach using `JSONExtract` set of functions. If the schema is fixed, you can use `Tuple()` for `JSONEachRow` format but you need to use classic tuple notation (using index reference) inside the MV, because using named tuples inside the MV won't work: + +- 💡 `tuple.1 AS column1, tuple.2 AS column2` **CORRECT!** +- 💡 `tuple.column1 AS column1, tuple.column2 AS column2` **WRONG!** +- 💡 use `AS` (alias) for aggregated columns or columns affected by functions because MV do not work by positional arguments like SELECTs,they work by names** + +Example: + +- `parseDateTime32BestEffort(t_date)` **WRONG!** +- `parseDateTime32BestEffort(t_date) AS t_date` **CORRECT!** + +## Using JSONEachRow with Map() in Materialized views + +Sometimes we can have a nested json message with a dynamic size like these and all elements inside the nested json must be of the same type: + +```json +{"k": "val1", "st": {"a": 42, "b": 1.877363}} + +{"k": "val2", "st": {"a": 43, "b": 2.3343, "c": 34.4434}} + +{"k": "val3", "st": {"a": 66743}} +``` + +In this case we can use Map() to parse the JSON message: + +```sql + +CREATE TABLE tests.nest_map_source +( + `k` String, + `st` Map(String, Float64) +) +Engine = Null + +CREATE MATERIALIZED VIEW tests.mv_nest_map TO tests.nest_map_destination +AS +SELECT + k AS k, + st['a'] AS st_a, + st['b'] AS st_b, + st['c'] AS st_c +FROM tests.nest_map_source + + +CREATE TABLE tests.nest_map_destination +( + `k` String, + `st_a` Float64, + `st_b` Float64, + `st_c` Float64 +) +ENGINE = MergeTree +ORDER BY tuple() +``` + +By default, ClickHouse will ignore unknown keys in the Map() but if you want to fail the insert if there are unknown keys then use the setting: + +`input_format_skip_unknown_fields = 0` + +```sql +INSERT INTO tests.nest_map_source FORMAT JSONEachRow {"k": "val1", "st": {"a": 42, "b": 1.877363}} +INSERT INTO tests.nest_map_source FORMAT JSONEachRow {"k": "val2", "st": {"a": 43, "b": 2.3343, "c": 34.4434}} +INSERT INTO tests.nest_map_source FORMAT JSONEachRow {"k": "val3", "st": {"a": 66743}} + + +SELECT * +FROM tests.nest_map_destination + +┌─k────┬─st_a─┬─────st_b─┬─st_c─┐ +│ val1 │ 42 │ 1.877363 │ 0 │ +└──────┴──────┴──────────┴──────┘ +┌─k────┬──st_a─┬─st_b─┬─st_c─┐ +│ val3 │ 66743 │ 0 │ 0 │ +└──────┴───────┴──────┴──────┘ +┌─k────┬─st_a─┬───st_b─┬────st_c─┐ +│ val2 │ 43 │ 2.3343 │ 34.4434 │ +└──────┴──────┴────────┴─────────┘ +``` + +See also: + +- [JSONExtract to parse many attributes at a time](/altinity-kb-queries-and-syntax/jsonextract-to-parse-many-attributes-at-a-time/) +- [JSONAsString and Mat. View as JSON parser](/altinity-kb-schema-design/altinity-kb-jsonasstring-and-mat.-view-as-json-parser/) \ No newline at end of file diff --git a/content/en/altinity-kb-schema-design/backfill_column.md b/content/en/altinity-kb-schema-design/backfill_column.md index 74b6ded2fb..0d1ff01b96 100644 --- a/content/en/altinity-kb-schema-design/backfill_column.md +++ b/content/en/altinity-kb-schema-design/backfill_column.md @@ -10,6 +10,15 @@ description: >- Sometimes you need to add a column into a huge table and backfill it with a data from another source, without reingesting all data. + +{{% alert title="Replicated setup" color="info" %}} +In case of a replicated / sharded setup you need to have the dictionary and source table (dict_table / item_dict) on all nodes and they have to all have EXACTLY the same data. The easiest way to do this is to make dict_table replicated. + +In this case, you will need to set the setting `allow_nondeterministic_mutations=1` on the user that runs the `ALTER TABLE`. See the [ClickHouse® docs](https://clickhouse.com/docs/en/operations/settings/settings#allow_nondeterministic_mutations) for more information about this setting. +{{% /alert %}} + + + Here is an example. ```sql @@ -160,7 +169,3 @@ WHERE key1 = 11111 │ 11111 │ 90 │ 9 │ 2021-11-30 │ 11111xxx │ └───────┴──────┴──────┴────────────┴──────────┘ ``` - -In case of a replicated / sharded setup you need to have the dictionary and source table (dict_table / item_dict) on all nodes and they have to all have EXACTLY the same data. The easiest way to do this is to make dict_table replicated. - -In this case, you will need to set the setting `allow_nondeterministic_mutations=1` on the user that runs the `ALTER TABLE`. See the [ClickHouse docs](https://clickhouse.com/docs/en/operations/settings/settings#allow_nondeterministic_mutations) for more information about this setting. diff --git a/content/en/altinity-kb-schema-design/best-schema-for-storing-many-metrics-registered-from-the-single-source.md b/content/en/altinity-kb-schema-design/best-schema-for-storing-many-metrics-registered-from-the-single-source.md index 80f9d7430a..886d824d84 100644 --- a/content/en/altinity-kb-schema-design/best-schema-for-storing-many-metrics-registered-from-the-single-source.md +++ b/content/en/altinity-kb-schema-design/best-schema-for-storing-many-metrics-registered-from-the-single-source.md @@ -72,7 +72,7 @@ Pros and cons: * easy to extend, you can have very dynamic / huge number of metrics. * the only option to store hierarchical / complicated data structures, also with arrays etc. inside. * good for sparse recording (each time point can have only 1% of all the possible metrics) - * ClickHouse has efficient API to work with JSON + * ClickHouse® has efficient API to work with JSON * nice if your data originally came in JSON (don't need to reformat) * Cons * uses storage non efficiently @@ -88,14 +88,14 @@ Same pros/cons as raw JSON, but usually bit more compact than JSON Pros and cons: * Pros - * clickhouse has efficient API to work with URLs (extractURLParameter etc) + * ClickHouse has efficient API to work with URLs (extractURLParameter etc) * can have sense if you data came in such format (i.e. you can store GET / POST request data directly w/o reprocessing) * Cons * slower than arrays ### 2e Several 'baskets' of arrays -i.e.: timestamp, sourceid, metric_names_basket1, metric_values_basker1, ..., metric_names_basketN, metric_values_basketN +i.e.: timestamp, sourceid, metric_names_basket1, metric_values_basket1, ..., metric_names_basketN, metric_values_basketN The same as 2b, but there are several key-value arrays ('basket'), and metric go to one particular basket depending on metric name (and optionally by metric type) Pros and cons: @@ -115,18 +115,14 @@ With that approach you can have as many metrics as you need and they can be very At any time you can decide to move one more metric to a separate column `ALTER TABLE ... ADD COLUMN metricX Float64 MATERIALIZED metrics.value[indexOf(metrics.names,'metricX')];` -### 2e Subcolumns [future] +## 3 json type -[https://github.com/ClickHouse/ClickHouse/issues/23516](https://github.com/ClickHouse/ClickHouse/issues/23516) +https://clickhouse.com/blog/a-new-powerful-json-data-type-for-clickhouse -WIP currently, ETA of first beta = autumn 2021 Related links: -[There is one article on our blog on this subject with some benchmarks.](https://www.altinity.com/blog/2019/5/23/handling-variable-time-series-efficiently-in-clickhouse") +[There is one article on our blog on this subject with some benchmarks.](https://www.altinity.com/blog/2019/5/23/handling-variable-time-series-efficiently-in-clickhouse) [Slides from Percona Live](https://www.percona.com/sites/default/files/ple19-slides/day1-pm/clickhouse-for-timeseries.pdf") -[Uber article about how they adapted combined approach](https://eng.uber.com/logging/") - -[Slides for Uber log storage approach](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup40/uber.pdf") diff --git a/content/en/altinity-kb-schema-design/change-order-by.md b/content/en/altinity-kb-schema-design/change-order-by.md index 4cad274ec8..3253c0d1b7 100644 --- a/content/en/altinity-kb-schema-design/change-order-by.md +++ b/content/en/altinity-kb-schema-design/change-order-by.md @@ -37,14 +37,24 @@ ORDER BY (column1, column2, column3) 4. Copy data from `example_table_old` into `example_table_temp` - a. Use this query to generate a list of INSERT statements + a. Use this query to generate a list of INSERT statements ```sql + -- old Clickhouse versions before a support of `where _partition_id` select concat('insert into example_table_temp select * from example_table_old where toYYYYMM(date)=',partition) as cmd, database, table, partition, sum(rows), sum(bytes_on_disk), count() from system.parts where database='default' and table='example_table_old' group by database, table, partition order by partition + + -- newer Clickhouse versions with a support of `where _partition_id` + select concat('insert into example_table_temp select * from ', table,' where _partition_id = \'',partition_id, '\';') as cmd, + database, table, partition, sum(rows), sum(bytes_on_disk), count() + from system.parts + where database='default' and table='example_table_old' + group by database, table, partition_id, partition + order by partition_id + ``` b. Create an intermediate table @@ -70,7 +80,7 @@ ORDER BY (column1, column2, column3) order by partition ``` -5. Attach data from the intermediate table to `example_table` +6. Attach data from the intermediate table to `example_table` a. Use this query to generate a list of ATTACH statements ```sql @@ -93,5 +103,5 @@ ORDER BY (column1, column2, column3) order by partition ``` -6. Drop `example_table_old` and `example_table_temp` +7. Drop `example_table_old` and `example_table_temp` diff --git a/content/en/altinity-kb-schema-design/codecs/_index.md b/content/en/altinity-kb-schema-design/codecs/_index.md index 0321fe3371..e3281b82ff 100644 --- a/content/en/altinity-kb-schema-design/codecs/_index.md +++ b/content/en/altinity-kb-schema-design/codecs/_index.md @@ -4,6 +4,25 @@ linkTitle: "Codecs" description: > Codecs --- + +| Codec Name | Recommended Data Types | Performance Notes | +|------------------|--------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| LZ4 | Any | Used by default. Extremely fast; good compression; balanced speed and efficiency | +| ZSTD(level) | Any | Good compression; pretty fast; best for high compression needs. Don't use levels higher than 3. | +| LZ4HC(level) | Any | LZ4 High Compression algorithm with configurable level; slower but better compression than LZ4, but decompression is still fast. | +| Delta | Integer Types, Time Series Data, Timestamps | Preprocessor (should be followed by some compression codec). Stores difference between neighboring values; good for monotonically increasing data. | +| DoubleDelta | Integer Types, Time Series Data | Stores difference between neighboring delta values; suitable for time series data | +| Gorilla | Floating Point Types | Calculates XOR between current and previous value; suitable for slowly changing numbers | +| T64 | Integer, Time Series Data, Timestamps | Preprocessor (should be followed by some compression codec). Crops unused high bits; puts them into a 64x64 bit matrix; optimized for 64-bit data types | +| GCD | Integer Numbers | Preprocessor (should be followed by some compression codec). Greatest common divisor compression; divides values by a common divisor; effective for divisible integer sequences | +| FPC | Floating Point Numbers | Designed for Float64; Algorithm detailed in [FPC paper](https://userweb.cs.txstate.edu/~burtscher/papers/dcc07a.pdf), [ClickHouse® PR #37553](https://github.com/ClickHouse/ClickHouse/pull/37553) | +| ZSTD_QAT | Any | Requires hardware support for QuickAssist Technology (QAT) hardware; provides accelerated compression tasks | +| DEFLATE_QPL | Any | Requires hardware support for Intel’s QuickAssist Technology for DEFLATE compression; enhanced performance for specific hardware | +| LowCardinality | String | It's not a codec, but a datatype modifier. Reduces representation size; effective for columns with low cardinality | +| NONE | Non-compressable data with very high entropy, like some random string, or some AggregateFunction states | No compression at all. Can be used on the columns that can not be compressed anyway. | + + + See [How to test different compression codecs](altinity-kb-how-to-test-different-compression-codecs) diff --git a/content/en/altinity-kb-schema-design/floats-vs-decimals.md b/content/en/altinity-kb-schema-design/floats-vs-decimals.md index ddb8683916..bb5ed7eb21 100644 --- a/content/en/altinity-kb-schema-design/floats-vs-decimals.md +++ b/content/en/altinity-kb-schema-design/floats-vs-decimals.md @@ -43,7 +43,7 @@ SELECT (toDecimal64(100000000000000000., 1) - toDecimal64(100000000000000000., 1 ``` {{% alert title="Warning" color="warning" %}} -Because clickhouse uses MPP order of execution of a single query can vary on each run, and you can get slightly different results from the float column every time you run the query. +Because ClickHouse® uses MPP order of execution of a single query can vary on each run, and you can get slightly different results from the float column every time you run the query. Usually, this deviation is small, but it can be significant when some kind of arithmetic operation is performed on very large and very small numbers at the same time. {{% /alert %}} diff --git a/content/en/altinity-kb-schema-design/how-much-is-too-much.md b/content/en/altinity-kb-schema-design/how-much-is-too-much.md index 88978592f5..193e3c4c82 100644 --- a/content/en/altinity-kb-schema-design/how-much-is-too-much.md +++ b/content/en/altinity-kb-schema-design/how-much-is-too-much.md @@ -1,14 +1,15 @@ --- -title: "How much is too much?" -linkTitle: "How much is too much?" +title: "ClickHouse® limitations" +linkTitle: "ClickHouse limitations" weight: 100 -description: >- - ClickHouse Limitations. +description: + How much is too much? +keywords: + - clickhouse limitations + - clickhouse too many parts --- -## How much is too much? - -In most of the cases clickhouse don't have any hard limits. But obsiously there there are some practical limitation / barriers for different things - often they are caused by some system / network / filesystem limitation. +In most of the cases ClickHouse® doesn't have any hard limits. But obviously there there are some practical limitation / barriers for different things - often they are caused by some system / network / filesystem limitation. So after reaching some limits you can get different kind of problems, usually it never a failures / errors, but different kinds of degradations (slower queries / high cpu/memory usage, extra load on the network / zookeeper etc). @@ -16,14 +17,18 @@ While those numbers can vary a lot depending on your hardware & settings there i ### Number of tables (system-wide, across all databases) -- non-replicated MergeTree-family tables = few thousands is still acceptable, if you don't do realtime inserts in more that few dozens of them. See [#32259](https://github.com/ClickHouse/ClickHouse/issues/32259) -- ReplicatedXXXMergeTree = few hundreds is still acceptable, if you don't do realtime inserts in more that few dozens of them. Every Replicated table comes with it's own cost (need to do housekeepeing operations, monitoing replication queues etc). See [#31919](https://github.com/ClickHouse/ClickHouse/issues/31919) +- non-replicated [MergeTree-family](https://kb.altinity.com/engines/mergetree-table-engine-family/) tables = few thousands is still acceptable, if you don't do realtime inserts in more that few dozens of them. See [#32259](https://github.com/ClickHouse/ClickHouse/issues/32259) +- ReplicatedXXXMergeTree = few hundreds is still acceptable, if you don't do realtime inserts in more that few dozens of them. Every Replicated table comes with it's own cost (need to do housekeeping operations, monitoring replication queues etc). See [#31919](https://github.com/ClickHouse/ClickHouse/issues/31919) - Log family table = even dozens of thousands is still ok, especially if database engine = Lazy is used. ### Number of databases Fewer than number of tables (above). Dozens / hundreds is usually still acceptable. +### Number of inserts per seconds + +For usual (non async) inserts - dozen is enough. Every insert creates a part, if you will create parts too often, ClickHouse will not be able to merge them and you will be getting 'too many parts'. + ### Number of columns in the table Up to a few hundreds. With thousands of columns the inserts / background merges may become slower / require more of RAM. @@ -59,17 +64,17 @@ Dozens is still ok. More may require having more complex (non-flat) routing. 2 is minimum for HA. 3 is a 'golden standard'. Up to 6-8 is still ok. If you have more with realtime inserts - it can impact the zookeeper traffic. -### Number of zookeeper nodes in the ensemble +### Number of [Zookeeper nodes](https://docs.altinity.com/operationsguide/clickhouse-zookeeper/) in the ensemble -3 (Three) for most of the cases is enough (you can loose one node). Using more nodes allows to scale up read throughput for zookeeper, but don't improve writes at all. +3 (Three) for most of the cases is enough (you can loose one node). Using more nodes allows to scale up read throughput for zookeeper, but doesn't improve writes at all. -### Number of materialized view attached to a single table. +### Number of [materialized views](/altinity-kb-schema-design/materialized-views/) attached to a single table. -Up to few. The less the better if the table is getting realtime inserts. (no matter if MV are chained or all are feeded from the same source table). +Up to few. The less the better if the table is getting realtime inserts. (no matter if MV are chained or all are fed from the same source table). -The more you have the more costy your inserts are, and the bigger risks to get some inconsitencies between some MV (inserts to MV and main table are not atomic). +The more you have the more costly your inserts are, and the bigger risks to get some inconsistencies between some MV (inserts to MV and main table are not atomic). -If the table don't have realtime inserts you can have more MV. +If the table doesn't have realtime inserts you can have more MV. ### Number of projections inside a single table. @@ -81,10 +86,10 @@ One to about a dozen. Different types of indexes has different penalty, bloom_fi At some point your inserts will slow down. Try to create possible minimum of indexes. You can combine many columns into a single index and this index will work for any predicate but create less impact. -### Number of Kafka tables / consumers inside +### Number of [Kafka tables / consumers](https://altinity.com/blog/kafka-engine-the-story-continues) inside High number of Kafka tables maybe quite expensive (every consumer = very expensive librdkafka object with several threads inside). -Usually alternative approaches are preferrable (mixing several datastreams in one topic, denormalizing, consuming several topics of identical structure with a single Kafka table, etc). +Usually alternative approaches are preferable (mixing several datastreams in one topic, denormalizing, consuming several topics of identical structure with a single Kafka table, etc). If you really need a lot of Kafka tables you may need more ram / CPU on the node and increase `background_message_broker_schedule_pool_size` (default is 16) to the number of Kafka tables. diff --git a/content/en/altinity-kb-schema-design/ingestion-aggregate-function.md b/content/en/altinity-kb-schema-design/ingestion-aggregate-function.md new file mode 100644 index 0000000000..a9158a7422 --- /dev/null +++ b/content/en/altinity-kb-schema-design/ingestion-aggregate-function.md @@ -0,0 +1,97 @@ +--- +title: "Ingestion of AggregateFunction" +linkTitle: "Ingestion of AggregateFunction" +weight: 100 +description: >- + ClickHouse® - How to insert AggregateFunction data +--- + +## How to insert AggregateFunction data + +### Ephemeral column + +```sql +CREATE TABLE users ( + uid Int16, + updated SimpleAggregateFunction(max, DateTime), + name_stub String Ephemeral, + name AggregateFunction(argMax, String, DateTime) + default arrayReduce('argMaxState', [name_stub], [updated]) +) ENGINE=AggregatingMergeTree order by uid; + +INSERT INTO users(uid, updated, name_stub) VALUES (1231, '2020-01-02 00:00:00', 'Jane'); + +INSERT INTO users(uid, updated, name_stub) VALUES (1231, '2020-01-01 00:00:00', 'John'); + +SELECT + uid, + max(updated) AS updated, + argMaxMerge(name) +FROM users +GROUP BY uid +┌──uid─┬─────────────updated─┬─argMaxMerge(name)─┐ +│ 1231 │ 2020-01-02 00:00:00 │ Jane │ +└──────┴─────────────────────┴───────────────────┘ +``` + +### Input function + +```sql +CREATE TABLE users ( + uid Int16, + updated SimpleAggregateFunction(max, DateTime), + name AggregateFunction(argMax, String, DateTime) +) ENGINE=AggregatingMergeTree order by uid; + +INSERT INTO users +SELECT uid, updated, arrayReduce('argMaxState', [name], [updated]) +FROM input('uid Int16, updated DateTime, name String') FORMAT Values (1231, '2020-01-02 00:00:00', 'Jane'); + +INSERT INTO users +SELECT uid, updated, arrayReduce('argMaxState', [name], [updated]) +FROM input('uid Int16, updated DateTime, name String') FORMAT Values (1231, '2020-01-01 00:00:00', 'John'); + +SELECT + uid, + max(updated) AS updated, + argMaxMerge(name) +FROM users +GROUP BY uid; +┌──uid─┬─────────────updated─┬─argMaxMerge(name)─┐ +│ 1231 │ 2020-01-02 00:00:00 │ Jane │ +└──────┴─────────────────────┴───────────────────┘ +``` + +### Materialized View And Null Engine + +```sql +CREATE TABLE users ( + uid Int16, + updated SimpleAggregateFunction(max, DateTime), + name AggregateFunction(argMax, String, DateTime) +) ENGINE=AggregatingMergeTree order by uid; + +CREATE TABLE users_null ( + uid Int16, + updated DateTime, + name String +) ENGINE=Null; + +CREATE MATERIALIZED VIEW users_mv TO users AS +SELECT uid, updated, arrayReduce('argMaxState', [name], [updated]) name +FROM users_null; + +INSERT INTO users_null Values (1231, '2020-01-02 00:00:00', 'Jane'); + +INSERT INTO users_null Values (1231, '2020-01-01 00:00:00', 'John'); + +SELECT + uid, + max(updated) AS updated, + argMaxMerge(name) +FROM users +GROUP BY uid; +┌──uid─┬─────────────updated─┬─argMaxMerge(name)─┐ +│ 1231 │ 2020-01-02 00:00:00 │ Jane │ +└──────┴─────────────────────┴───────────────────┘ +``` diff --git a/content/en/altinity-kb-schema-design/insert_deduplication.md b/content/en/altinity-kb-schema-design/insert_deduplication.md index 229a20a648..841dfef788 100644 --- a/content/en/altinity-kb-schema-design/insert_deduplication.md +++ b/content/en/altinity-kb-schema-design/insert_deduplication.md @@ -1,13 +1,14 @@ --- -title: "Insert Deduplication / Insert idempotency" +title: "Insert Deduplication / Insert Idempotency" linkTitle: "insert deduplication" weight: 100 description: >- - Insert Deduplication / Insert idempotency , insert_deduplicate setting. + Using ClickHouse® features to avoid duplicate data +keywords: + - clickhouse insert deduplication + - clickhouse insert_deduplicate --- -# Insert Deduplication - Replicated tables have a special feature insert deduplication (enabled by default). [Documentation:](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/) @@ -49,7 +50,7 @@ In `clickhouse-server.log` you may see trace messages `Block with ID ... already ..17:52:45.076738.. Block with ID all_7615936253566048997_747463735222236827 already exists locally as part all_0_0_0; ignoring it. ``` -Deduplication checksums are stored in Zookeeper in `/blocks` table's znode for each partition separately, so when you drop partition, they could be identified and removed for this partition. +Deduplication checksums are stored in [Zookeeper](https://docs.altinity.com/operationsguide/clickhouse-zookeeper/) in `/blocks` table's znode for each partition separately, so when you drop partition, they could be identified and removed for this partition. (during `alter table delete` it's impossible to match checksums, that's why checksums stay in Zookeeper). ```sql SELECT name, value @@ -62,7 +63,7 @@ WHERE path = '/clickhouse/cluster_test/tables/test_insert/blocks' ## insert_deduplicate setting -Insert deduplication is controled by the [insert_deduplicate](https://clickhouse.com/docs/en/operations/settings/settings/#settings-insert-deduplicate) setting +Insert deduplication is controlled by the [insert_deduplicate](https://clickhouse.com/docs/en/operations/settings/settings/#settings-insert-deduplicate) setting Let's disable it: ```sql @@ -244,7 +245,7 @@ select * from test_insert format PrettyCompactMonoBlock; ## insert_deduplication_token -Since Clikhouse 22.2 there is a new setting [insert_dedupplication_token](https://clickhouse.com/docs/en/operations/settings/settings/#insert_deduplication_token). +Since ClickHouse® 22.2 there is a new setting [insert_deduplication_token](https://clickhouse.com/docs/en/operations/settings/settings/#insert_deduplication_token). This setting allows you to define an explicit token that will be used for deduplication instead of calculating a checksum from the inserted data. ```sql @@ -254,14 +255,14 @@ ENGINE = MergeTree ORDER BY A SETTINGS non_replicated_deduplication_window = 100; -INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test' (1); +INSERT INTO test_table SETTINGS insert_deduplication_token = 'test' VALUES (1); -- the next insert won't be deduplicated because insert_deduplication_token is different -INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test1' (1); +INSERT INTO test_table SETTINGS insert_deduplication_token = 'test1' VALUES (1); -- the next insert will be deduplicated because insert_deduplication_token -- is the same as one of the previous -INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test' (2); +INSERT INTO test_table SETTINGS insert_deduplication_token = 'test' VALUES (2); SELECT * FROM test_table ┌─A─┐ │ 1 │ diff --git a/content/en/altinity-kb-schema-design/materialized-views/_index.md b/content/en/altinity-kb-schema-design/materialized-views/_index.md index f4b684dc24..95e3f147e3 100644 --- a/content/en/altinity-kb-schema-design/materialized-views/_index.md +++ b/content/en/altinity-kb-schema-design/materialized-views/_index.md @@ -1,18 +1,27 @@ --- -title: "MATERIALIZED VIEWS" -linkTitle: "MATERIALIZED VIEWS" +title: "ClickHouse® Materialized Views" +linkTitle: "Materialized Views" description: > - MATERIALIZED VIEWS + Making the most of this powerful ClickHouse® feature +keywords: + - clickhouse materialized view + - create materialized view clickhouse --- -{{% alert title="Info" color="info" %}} -MATERIALIZED VIEWs in ClickHouse behave like AFTER INSERT TRIGGER to the left-most table listed in its SELECT statement. -{{% /alert %}} +ClickHouse® MATERIALIZED VIEWs behave like AFTER INSERT TRIGGER to the left-most table listed in their SELECT statement and never read data from disk. Only rows that are placed to the RAM buffer by INSERT are read. -# MATERIALIZED VIEWS +## Useful links -* Clickhouse and the magic of materialized views. Basics explained with examples: [webinar recording](https://altinity.com/webinarspage/2019/6/26/clickhouse-and-the-magic-of-materialized-views) -* Everything you should know about materialized views. Very detailed information about internals: [video](https://youtu.be/ckChUkC3Pns?t=9353), [annotated presentation](https://den-crane.github.io/Everything_you_should_know_about_materialized_views_commented.pdf), [presentation](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup47/materialized_views.pdf) +* ClickHouse Materialized Views Illuminated, Part 1: + * [Blog post](https://altinity.com/blog/clickhouse-materialized-views-illuminated-part-1) + * [Webinar recording](https://www.youtube.com/watch?app=desktop&v=j15dvPGzhyE) +* ClickHouse Materialized Views Illuminated, Part 2: + * [Blog post](https://altinity.com/blog/clickhouse-materialized-views-illuminated-part-2) + * [Webinar recording](https://www.youtube.com/watch?v=THDk625DGsQ) + * [Slides](https://altinity.com/wp-content/uploads/2024/05/ClickHouse-Materialized-Views-The-Magic-Continues-1.pdf) +* Everything you should know about Materialized Views: + * [Video](https://www.youtube.com/watch?v=ckChUkC3Pns&t=9353s) + * [Annotated slides](https://den-crane.github.io/Everything_you_should_know_about_materialized_views_commented.pdf) ## Best practices @@ -29,7 +38,7 @@ MATERIALIZED VIEWs in ClickHouse behave like AFTER INSERT TRIGGER to the left-mo That way it's bit simpler to do schema migrations or build more complicated pipelines when one table is filled by several MV. - With engine=Atomic it hard to map undelying table with the MV. + With engine=Atomic it hard to map underlying table with the MV. 2. Avoid using POPULATE when creating MATERIALIZED VIEW on big tables. @@ -55,15 +64,15 @@ MATERIALIZED VIEWs in ClickHouse behave like AFTER INSERT TRIGGER to the left-mo Since MATERIALIZED VIEWs are updated on every INSERT to the underlying table and you can not insert anything to the usual VIEW, the materialized view update will never be triggered. -Normally you should build MATERIALIZED VIEWs on the top of the table with MergeTree engine family. +Normally, you should build MATERIALIZED VIEWs on the top of the table with the MergeTree engine family. -### Q. I've created materialized error with some error, and since it's it reading from Kafka I don't understand where the error is +### Q. I've created a materialized error with some error, and since it's reading from Kafka, I don't understand where the error is -Server logs will help you. Also, see the next question. +Look into system.query_views_log table or server logs, or system.text_log table. Also, see the next question. ### Q. How to debug misbehaving MATERIALIZED VIEW? -You can also attach the same MV to some dummy table with engine=Log (or even Null) and do some manual inserts there to debug the behavior. Similar way (as the Materialized view often can contain some pieces of the business logic of the application) you can create tests for your schema. +You can also attach the same MV to a dummy table with engine=Null and do manual inserts to debug the behavior. In a similar way (as the Materialized view often contains some pieces of the application's business logic), you can create tests for your schema. {{% alert title="Warning" color="warning" %}} Always test MATERIALIZED VIEWs first on staging or testing environments @@ -85,6 +94,12 @@ So it will most probably work not as you expect and will hit insert performance The MV will be attached (as AFTER INSERT TRIGGER) to the left-most table in the MV SELECT statement, and it will 'see' only freshly inserted rows there. It will 'see' the whole set of rows of other tables, and the query will be executed EVERY TIME you do the insert to the left-most table. That will impact the performance speed there significantly. If you really need to update the MV with the left-most table, not impacting the performance so much you can consider using dictionary / engine=Join / engine=Set for right-hand table / subqueries (that way it will be always in memory, ready to use). +### Q. How are MVs executed sequentially or in parallel? + +By default, the execution is sequential and alphabetical. It can be switched by [parallel_view_processing](https://clickhouse.com/docs/en/operations/settings/settings#parallel_view_processing). + +Parallel processing could be helpful if you have a lot of spare CPU power (cores) and want to utilize it. Add the setting to the insert statement or to the user profile. New blocks created by MVs will also follow the squashing logic similar to the one used in the insert, but they will use the min_insert_block_size_rows_for_materialized_views and min_insert_block_size_bytes_for_materialized_views settings. + ### Q. How to alter MV implicit storage (w/o TO syntax) 1) take the existing MV definition diff --git a/content/en/altinity-kb-schema-design/materialized-views/backfill-populate-mv-in-a-controlled-manner.md b/content/en/altinity-kb-schema-design/materialized-views/backfill-populate-mv-in-a-controlled-manner.md index 43213bd5f3..92dfa67e9b 100644 --- a/content/en/altinity-kb-schema-design/materialized-views/backfill-populate-mv-in-a-controlled-manner.md +++ b/content/en/altinity-kb-schema-design/materialized-views/backfill-populate-mv-in-a-controlled-manner.md @@ -24,4 +24,52 @@ INSERT INTO mv_import SELECT * FROM huge_table WHERE toYYYYMM(ts) = 202105; ALTER TABLE mv ATTACH PARTITION ID '202105' FROM mv_import; ``` -See also [https://clickhouse.tech/docs/en/sql-reference/statements/alter/partition/\#alter_attach-partition-from](https://clickhouse.tech/docs/en/sql-reference/statements/alter/partition/\#alter_attach-partition-from). +See also [the ClickHouse® documentation on Manipulating Partitions and Parts](https://clickhouse.com/docs/en/sql-reference/statements/alter/partition). + +Q. I still do not have enough RAM to GROUP BY the whole partition. + +A. Push aggregating to the background during MERGES + +There is a modified version of MergeTree Engine, called [AggregatingMergeTree](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/aggregatingmergetree). That engine has additional logic that is applied to rows with the same set of values in columns that are specified in the table's ORDER BY expression. All such rows are aggregated to only one rows using the aggregating functions defined in the column definitions. There are two "special" column types, designed specifically for that purpose: + +- [AggregatingFunction](https://clickhouse.com/docs/en/sql-reference/data-types/aggregatefunction) +- [SimpleAggregatingFunction](https://clickhouse.com/docs/en/sql-reference/data-types/simpleaggregatefunction) + +INSERT ... SELECT operating over the very large partition will create data parts by 1M rows (min_insert_block_size_rows), those parts will be aggregated during the merge process the same way as GROUP BY do it, but the number of rows will be much less than the total rows in the partition and RAM usage too. Merge combined with GROUP BY will create a new part with a much less number of rows. That data part possibly will be merged again with other data, but the number of rows will be not too big. + +```sql +CREATE TABLE mv_import ( + id UInt64, + ts SimpleAggregatingFunction(max,DateTime), -- most fresh + v1 SimpleAggregatingFunction(sum,UInt64), -- just sum + v2 SimpleAggregatingFunction(max,String), -- some not empty string + v3 AggregatingFunction(argMax,String,ts) -- last value +) ENGINE = AggregatingMergeTree() +ORDER BY id; + +INSERT INTO mv_import +SELECT id, -- ORDER BY column + ts,v1,v2, -- state for SimpleAggregatingFunction the same as value + initializeAggregation('argMaxState',v3,ts) -- we need to convert from values to States for columns with AggregatingFunction type +FROM huge_table +WHERE toYYYYMM(ts) = 202105; +``` + +Actually, the first GROUP BY run will happen just before 1M rows will be stored on disk as a data part. You may disable that behavior by switching off [optimize_on_insert](https://clickhouse.com/docs/en/operations/settings/settings#optimize-on-insert) setting if you have heavy calculations during aggregation. + +You may attach such a table (with AggregatingFunction columns) to the main table as in the example above, but if you don't like having States in the Materialized Table, data should be finalized and converted back to normal values. In that case, you have to move data by INSERT ... SELECT again: + +```sql +INSERT INTO MV +SELECT id,ts,v1,v2, -- nothing special for SimpleAggregatingFunction columns + finalizeAggregation(v3) +from mv_import FINAL +``` + +The last run of GROUP BY will happen during FINAL execution and AggregatingFunction types converted back to normal values. To simplify retries after failures an additional temporary table and the same trick with ATTACH could be applied. + + + + + + diff --git a/content/en/altinity-kb-schema-design/materialized-views/idempotent_inserts_mv.md b/content/en/altinity-kb-schema-design/materialized-views/idempotent_inserts_mv.md index 19ebd72039..8a8e901672 100644 --- a/content/en/altinity-kb-schema-design/materialized-views/idempotent_inserts_mv.md +++ b/content/en/altinity-kb-schema-design/materialized-views/idempotent_inserts_mv.md @@ -8,17 +8,17 @@ description: >- ## Why inserts into materialized views are not idempotent? -ClickHouse still does not have transactions. They will be implemented around 2022Q2. +ClickHouse® still does not have transactions. They were to be implemented around 2022Q2 but still not in the roadmap. -Because of Clickhouse materialized view is a trigger. And an insert into a table and an insert into a subordinate materialized view it's two different inserts so they are not atomic alltogether. +Because of ClickHouse materialized view is a trigger. And an insert into a table and an insert into a subordinate materialized view it's two different inserts so they are not atomic altogether. -And insert into a materialized view may fail after the succesful insert into the table. In case of any failure a client gets the error about failed insertion. -You may enable insert_deduplication (it's enabled by default for Replciated engines) and repeate the insert with an idea to achive idempotate insertion, -and insertion will be skipped into the source table becase of deduplication but it will be skipped for materialized view as well because -by default materialized view inherites deduplication from the source table. +And insert into a materialized view may fail after the successful insert into the table. In case of any failure a client gets the error about failed insertion. +You may enable insert_deduplication (it's enabled by default for Replicated engines) and repeat the insert with an idea to archive idempotate insertion, +and insertion will be skipped into the source table because of deduplication but it will be skipped for materialized view as well because +by default materialized view inherits deduplication from the source table. It's controlled by a parameter `deduplicate_blocks_in_dependent_materialized_views` https://clickhouse.com/docs/en/operations/settings/settings/#settings-deduplicate-blocks-in-dependent-materialized-views -If your materialized view is wide enought and always have enought data for constistent deduplication then you can enable `deduplicate_blocks_in_dependent_materialized_views`. +If your materialized view is wide enough and always has enough data for consistent deduplication then you can enable `deduplicate_blocks_in_dependent_materialized_views`. Or you may add information for deduplication (some unique information / insert identifier). ### Example 1. Inconsistency with deduplicate_blocks_in_dependent_materialized_views 0 @@ -59,6 +59,10 @@ select sum(CNT) from test_mv; 0 rows in set. Elapsed: 0.001 sec. -- Inconsistency! Unfortunatly insert into MV was deduplicated as well ``` +That is another example - https://github.com/ClickHouse/ClickHouse/issues/56642 + + + ### Example 2. Inconsistency with deduplicate_blocks_in_dependent_materialized_views 1 ```sql @@ -148,7 +152,7 @@ select sum(CNT) from test_mv; └──────────┘ ``` -Idea how to fix it in Clickhouse source code https://github.com/ClickHouse/ClickHouse/issues/30240 +Idea how to fix it in ClickHouse source code https://github.com/ClickHouse/ClickHouse/issues/30240 ### Fake (unused) metric to add uniqueness. diff --git a/content/en/altinity-kb-schema-design/preaggregations.md b/content/en/altinity-kb-schema-design/preaggregations.md index 3c1e96ac56..d650ff6eaa 100644 --- a/content/en/altinity-kb-schema-design/preaggregations.md +++ b/content/en/altinity-kb-schema-design/preaggregations.md @@ -3,22 +3,22 @@ title: "Pre-Aggregation approaches" linkTitle: "Pre-Aggregation approaches" weight: 100 description: >- - ETL vs Materialized Views vs Projections in ClickHouse. + ETL vs Materialized Views vs Projections in ClickHouse® --- ## Pre-Aggregation approaches: ETL vs Materialized Views vs Projections -| | ETL | MV | Projections | -|:-|:-|:-|:-| -| Realtime | no | yes | yes | -| How complex queries can be used to build the preaggregaton | any | complex | very simple | -| Impacts the insert speed | no | yes | yes | -| Are inconsistancies possible | Depends on ETL. If it process the errors properly - no. | yes (no transactions / atomicity) | no | -| Lifetime of aggregation | any | any | Same as the raw data | -| Requirements | need external tools/scripting | is a part of database schema | is a part of table schema | +| | ETL | MV | Projections | +|:-|:-----------------------------------------------------------------|:-|:-| +| Realtime | no | yes | yes | +| How complex queries can be used to build the preaggregaton | any | complex | very simple | +| Impacts the insert speed | no | yes | yes | +| Are inconsistancies possible | Depends on ETL. If it process the errors properly - no. | yes (no transactions / atomicity) | no | +| Lifetime of aggregation | any | any | Same as the raw data | +| Requirements | need external tools/scripting | is a part of database schema | is a part of table schema | | How complex to use in queries | Depends on aggregation, usually simple, quering a separate table | Depends on aggregation, sometimes quite complex, quering a separate table | Very simple, quering the main table | -| Can work correctly with ReplacingMergeTree as a source | Yes | No | No | -| Can work correctly with CollapsingMergeTree as a source | Yes | For simple aggregations | For simple aggregations | -| Can be chained | Yes (Usually with DAGs / special scripts) | Yes (but may be not straightforward, and often is a bad idea) | No | -| Resources needed to calculate the increment | May be signigicant | Usually tiny | Usually tiny | +| Can work correctly with ReplacingMergeTree as a source | Yes | No | No | +| Can work correctly with CollapsingMergeTree as a source | Yes | For simple aggregations | For simple aggregations | +| Can be chained | Yes (Usually with DAGs / special scripts) | Yes (but may be not straightforward, and often is a bad idea) | No | +| Resources needed to calculate the increment | May be significant | Usually tiny | Usually tiny | diff --git a/content/en/altinity-kb-schema-design/row-level-deduplication.md b/content/en/altinity-kb-schema-design/row-level-deduplication.md index 2f26edac6b..cfa5cbad58 100644 --- a/content/en/altinity-kb-schema-design/row-level-deduplication.md +++ b/content/en/altinity-kb-schema-design/row-level-deduplication.md @@ -1,12 +1,12 @@ --- -title: "ClickHouse row-level deduplication" -linkTitle: "ClickHouse row-level deduplication" +title: "ClickHouse® row-level deduplication" +linkTitle: "ClickHouse® row-level deduplication" weight: 100 description: >- - ClickHouse row-level deduplication. + ClickHouse® row-level deduplication. --- -## ClickHouse row-level deduplication. +## ClickHouse® row-level deduplication. (Block level deduplication exists in Replicated tables, and is not the subject of that article). @@ -26,7 +26,7 @@ Approach 0. Make deduplication before ingesting data to ClickHouse + you have full control - extra coding and 'moving parts', storing some ids somewhere + clean and simple schema and selects in ClickHouse -! check if row exists in clickhouse before insert can give non-satisfing results if you use ClickHouse cluster (i.e. Replicated / Distributed tables) - due to eventual consistency. +! check if row exists in ClickHouse before insert can give non-satisfying results if you use ClickHouse cluster (i.e. Replicated / Distributed tables) - due to eventual consistency. Approach 1. Allow duplicates during ingestion. Remove them on SELECT level (by things like GROUP BY) + simple inserts @@ -44,7 +44,7 @@ Approach 2. Eventual deduplication using Replacing Approach 3. Eventual deduplication using Collapsing - complicated - can force you to use suboptimal primary key (which will guarantee record uniqueness) - - you need to store previous state of the record somewhere, or extract it before ingestion from clickhouse + - you need to store previous state of the record somewhere, or extract it before ingestion from ClickHouse - deduplication is eventual (same as with Replacing) + you can make the proper aggregations of last state w/o FINAL (bookkeeping-alike sums, counts etc) diff --git a/content/en/altinity-kb-schema-design/snowflakeid.md b/content/en/altinity-kb-schema-design/snowflakeid.md new file mode 100644 index 0000000000..320800a42d --- /dev/null +++ b/content/en/altinity-kb-schema-design/snowflakeid.md @@ -0,0 +1,123 @@ +--- +title: "SnowflakeID for Efficient Primary Keys " +linkTitle: "SnowflakeID" +weight: 100 +description: >- + SnowflakeID for Efficient Primary Keys +--- + +In data warehousing (DWH) environments, the choice of primary key (PK) can significantly impact performance, particularly in terms of RAM usage and query speed. This is where [SnowflakeID](https://en.wikipedia.org/wiki/Snowflake_ID) comes into play, providing a robust solution for PK management. Here’s a deep dive into why and how Snowflake IDs are beneficial and practical implementation examples. + +### Why Snowflake ID? + +- **Natural IDs Suck**: Natural keys derived from business data can lead to various issues like complexity and instability. Surrogate keys, on the other hand, are system-generated and stable. +- Surrogate keys simplify joins and indexing, which is crucial for performance in large-scale data warehousing. +- Monotonic or sequential IDs help maintain the order of entries, which is essential for performance tuning and efficient range queries. +- Having both a timestamp and a unique ID in the same column allows for fast filtering of rows during SELECT operations. This is particularly useful for time-series data. + +### **Building Snowflake IDs** + +There are two primary methods to construct the lower bits of a Snowflake ID: + +1. **Hash of Important Columns**: + + Using a hash function on significant columns ensures uniqueness and distribution. + +2. **Row Number in insert batch** + + Utilizing the row number within data blocks provides a straightforward approach to generating unique identifiers. + + +### **Implementation as UDF** + +Here’s how to implement Snowflake IDs using standard SQL functions while utilizing second and millisecond timestamps. + +Pack hash to lower 22 bits for DateTime64 and 32bits for DateTime + +```sql +create function toSnowflake64 as (dt,ch) -> + bitOr(dateTime64ToSnowflakeID(dt), + bitAnd(bitAnd(ch,0x3FFFFF)+ + bitAnd(bitShiftRight(ch, 20),0x3FFFFF)+ + bitAnd(bitShiftRight(ch, 40),0x3FFFFF), + 0x3FFFFF) + ); + +create function toSnowflake as (dt,ch) -> + bitOr(dateTimeToSnowflakeID(dt), + bitAnd(bitAnd(ch,0xFFFFFFFF)+ + bitAnd(bitShiftRight(ch, 32),0xFFFFFFFF), + 0xFFFFFFFF) + ); + +with cityHash64('asdfsdnfs;n') as ch, + now64() as dt +select dt, + hex(toSnowflake64(dt,ch) as sn) , + snowflakeIDToDateTime64(sn); + +with cityHash64('asdfsdnfs;n') as ch, + now() as dt +select dt, + hex(toSnowflake(dt,ch) as sn) , + snowflakeIDToDateTime(sn); +``` + +### **Creating Tables with Snowflake ID** + +**Using Materialized Columns and hash** + +```sql +create table XX +( + id Int64 materialized toSnowflake(now(),cityHash64(oldID)), + oldID String, + data String +) engine=MergeTree order by id; + +``` + +Note: Using User-Defined Functions (UDFs) in CREATE TABLE statements is not always useful, as they expand to create table DDL, and changing them is inconvenient. + +**Using a Null Table, Materialized View, and** rowNumberInAllBlocks + +A more efficient approach involves using a Null table and materialized views. + +```sql +create table XX +( + id Int64, + data String +) engine=MergeTree order by id; + +create table Null (data String) engine=Null; +create materialized view _XX to XX as +select toSnowflake(now(),rowNumberInAllBlocks()) is id, data +from Null; +``` + +### Converting from UUID to SnowFlakeID for subsequent events + +Consider that your event stream only has a UUID column identifying a particular user. Registration time that can be used as a base for SnowFlakeID is presented only in the first ‘register’ event, but not in subsequent events. It’s easy to generate SnowFlakeID for the register event, but next, we need to get it from some other table without disturbing the ingestion process too much. Using Hash JOINs in Materialized Views is not recommended, so we need some “nested loop join” to get data fast. In Clickhouse, the “nested loop join” is still not supported, but Direct Dictionary can work around it. + +```sql +CREATE TABLE UUID2ID_store (user_id UUID, id UInt64) +ENGINE = MergeTree() -- EmbeddedRocksDB can be used instead +ORDER BY user_id +settings index_granularity=256; + +CREATE DICTIONARY UUID2ID_dict (user_id UUID, id UInt64) +PRIMARY KEY user_id +LAYOUT ( DIRECT ()) +SOURCE(CLICKHOUSE(TABLE 'UUID2ID_store')); + +CREATE OR REPLACE FUNCTION UUID2ID AS (uuid) -> dictGet('UUID2ID_dict',id,uuid); + +CREATE MATERIALIZED VIEW _toUUID_store TO UUID2ID_store AS +select user_id, toSnowflake64(event_time, cityHash64(user_id)) as id +from Actions; +``` + +**Conclusion** + +Snowflake IDs provide an efficient mechanism for generating unique, monotonic primary keys, which are essential for optimizing query performance in data warehousing environments. By combining timestamps and unique identifiers, snowflake IDs facilitate faster row filtering and ensure stable, surrogate key generation. Implementing these IDs using SQL functions and materialized views ensures that your data warehouse remains performant and scalable. diff --git a/content/en/altinity-kb-schema-design/two-columns-indexing.md b/content/en/altinity-kb-schema-design/two-columns-indexing.md new file mode 100644 index 0000000000..9fa87c0538 --- /dev/null +++ b/content/en/altinity-kb-schema-design/two-columns-indexing.md @@ -0,0 +1,122 @@ +--- +title: "Two columns indexing" +linkTitle: "Two columns indexing" +weight: 100 +description: >- + How to create ORDER BY suitable for filtering over two different columns in two different queries +--- + +Suppose we have telecom CDR data in which A party calls B party. Each data row consists of A party details: event_timestamp, A MSISDN , A IMEI, A IMSI , A start location, A end location , B MSISDN, B IMEI, B IMSI , B start location, B end location, and some other metadata. + +Searches will use one of the A or B fields, for example, A IMSI, within the start and end time window. + +A msisdn, A imsi, A imei values are tightly coupled as users rarely change their phones. + + +The queries will be: + +```sql +select * from X where A = '0123456789' and ts between ...; +select * from X where B = '0123456789' and ts between ...; +``` + +and both A & B are high-cardinality values + +ClickHouse® primary skip index (ORDER BY/PRIMARY KEY) works great when you always include leading ORDER BY columns in the WHERE filter. There are exceptions for low-cardinality columns and high-correlated values, but here is another case. A & B both have high cardinality, and it seems that their correlation is at a medium level. + +Various solutions exist, and their effectiveness largely depends on the correlation of different column data. Testing all solutions on actual data is necessary to select the best one. + + +### ORDER BY + additional Skip Index + +```sql +create table X ( + A UInt32, + B UInt32, + ts DateTime, + .... + INDEX ix_B (B) type minmax GRANULARITY 3 +) engine = MergeTree +partition by toYYYYMM(ts) +order by (toStartOfDay(ts),A,B); +``` + +bloom_filter index type instead of min_max could work fine in some situations. + +### Inverted index as a projection + +```sql +create table X ( + A UInt32, + B UInt32, + ts DateTime, + .... + PROJECTION ix_B ( + select A, B,ts ORDER BY B, ts + ) +) engine = MergeTree +partition by toYYYYMM(ts) +order by (toStartOfDay(ts),A,B); + +select * from X +where A in (select A from X where B='....' and ts between ...) + and B='...' and ts between ... ; +``` + +- The number of rows the subquery returns should not be very high. 1M rows seems to be a suitable limit. +- A separate table with a Materialized View can also be used similarly. +- accessing pattern for the main table will "point", so better to lower index_granularity to 256. That will increase RAM usage by Primary Key + + +### mortonEncode +(available from 23.10) + +Do not prioritize either A or B, but distribute indexing efficiency between them. + + * https://github.com/ClickHouse/ClickHouse/issues/41195 + * https://www.youtube.com/watch?v=5GR1J4T4_d8 + * https://clickhouse.com/docs/en/operations/settings/settings#analyze_index_with_space_filling_curves + +```sql +create table X ( + A UInt32, + B UInt32, + ts DateTime, + .... +) engine = MergeTree +partition by toYYYYMM(ts) +order by (toStartOfDay(ts),mortonEncode(A,B)); +select * from X where A = '0123456789' and ts between ...; +select * from X where B = '0123456789' and ts between ...; +``` + +### mortonEncode with non-UInt columns + +[mortonEncode](https://clickhouse.com/docs/en/sql-reference/functions/encoding-functions#mortonencode) function requires UInt columns, but sometimes different column types are needed (like String or ipv6). In such a case, the cityHash64() function can be used both for inserting and querying: + +```sql +create table X ( + A IPv6, + B IPv6, + AA alias cityHash64(A), + BB alias cityHash64(B), + ts DateTime materialized now() +) engine = MergeTree +partition by toYYYYMM(ts) +order by +(toStartOfDay(ts),mortonEncode(cityHash64(A),cityHash64(B))) +; + +insert into X values ('fd7a:115c:a1e0:ab12:4843:cd96:624c:9a17','fd7a:115c:a1e0:ab12:4843:cd96:624c:9a17') + +select * from X where cityHash64(toIPv6('fd7a:115c:a1e0:ab12:4843:cd96:624c:9a17')) = AA; +``` + +### hilbertEncode as alternative +(available from 24.6) + +[hilbertEncode](https://clickhouse.com/docs/en/sql-reference/functions/encoding-functions#hilbertencode) can be used instead of mortonEncode. On some data it allows better results than mortonEncode. + + + + diff --git a/content/en/altinity-kb-schema-design/uniq-functions.md b/content/en/altinity-kb-schema-design/uniq-functions.md index 96b294845f..469467040e 100644 --- a/content/en/altinity-kb-schema-design/uniq-functions.md +++ b/content/en/altinity-kb-schema-design/uniq-functions.md @@ -46,3 +46,38 @@ do printf "|%s|%s,%s,%s,%s\n" "$f1" "$f2" "$size" "$result" "$time" done ``` + + +## groupBitmap + +Use [Roaring Bitmaps](https://roaringbitmap.org/) underneath. +Return amount of uniq values. + +Can be used with Int* types +Works really great when your values quite similar. (Low memory usage / state size) + +Example with blockchain data, block_number is monotonically increasing number. + +```sql +SELECT groupBitmap(block_number) FROM blockchain; + +┌─groupBitmap(block_number)─┐ +│ 48478157 │ +└───────────────────────────┘ + +MemoryTracker: Peak memory usage (for query): 64.44 MiB. +1 row in set. Elapsed: 32.044 sec. Processed 4.77 billion rows, 38.14 GB (148.77 million rows/s., 1.19 GB/s.) + +SELECT uniqExact(block_number) FROM blockchain; + +┌─uniqExact(block_number)─┐ +│ 48478157 │ +└─────────────────────────┘ + +MemoryTracker: Peak memory usage (for query): 4.27 GiB. +1 row in set. Elapsed: 70.058 sec. Processed 4.77 billion rows, 38.14 GB (68.05 million rows/s., 544.38 MB/s.) +``` + + + + diff --git a/content/en/altinity-kb-setup-and-maintenance/_index.md b/content/en/altinity-kb-setup-and-maintenance/_index.md index 1c3df79b50..5184ed2241 100644 --- a/content/en/altinity-kb-setup-and-maintenance/_index.md +++ b/content/en/altinity-kb-setup-and-maintenance/_index.md @@ -7,6 +7,6 @@ keywords: - monitor clickhouse - data migration description: > - Learn how to set up, deploy, monitor, and backup ClickHouse with step-by-step guides. + Learn how to set up, deploy, monitor, and backup ClickHouse® with step-by-step guides. weight: 5 --- diff --git a/content/en/altinity-kb-setup-and-maintenance/alters.md b/content/en/altinity-kb-setup-and-maintenance/alters.md new file mode 100644 index 0000000000..85f8b90627 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/alters.md @@ -0,0 +1,146 @@ +--- +title: "How ALTERs work in ClickHouse®" +linkTitle: "How ALTERs work in ClickHouse®" +weight: 100 +description: >- + +--- + +### How ALTERs work in ClickHouse®: + +#### ADD (COLUMN/INDEX/PROJECTION) + +Lightweight, will only change table metadata. +So new entity will be added in case of creation of new parts during INSERT's OR during merges of old parts. + +In case of COLUMN, ClickHouse will calculate column value on fly in query context. + +{{% alert title="Warning" color="warning" %}} + +```sql +CREATE TABLE test_materialization +( + `key` UInt32, + `value` UInt32 +) +ENGINE = MergeTree +ORDER BY key; + +INSERT INTO test_materialization(key, value) SELECT 1, 1; +INSERT INTO test_materialization(key, value) SELECT 2, 2; + +ALTER TABLE test_materialization ADD COLUMN inserted_at DateTime DEFAULT now(); + +SELECT key, inserted_at FROM test_materialization; + +┌─key─┬─────────inserted_at─┐ +│ 1 │ 2022-09-01 03:28:58 │ +└─────┴─────────────────────┘ +┌─key─┬─────────inserted_at─┐ +│ 2 │ 2022-09-01 03:28:58 │ +└─────┴─────────────────────┘ + +SELECT key, inserted_at FROM test_materialization; + +┌─key─┬─────────inserted_at─┐ +│ 1 │ 2022-09-01 03:29:11 │ +└─────┴─────────────────────┘ +┌─key─┬─────────inserted_at─┐ +│ 2 │ 2022-09-01 03:29:11 │ +└─────┴─────────────────────┘ + +Each query will return different inserted_at value, because each time now() function being executed. + + +INSERT INTO test_materialization(key, value) SELECT 3, 3; + +SELECT key, inserted_at FROM test_materialization; + +┌─key─┬─────────inserted_at─┐ +│ 3 │ 2022-09-01 03:29:36 │ -- < This value was materialized during ingestion, that's why it's smaller than value for keys 1 & 2 +└─────┴─────────────────────┘ +┌─key─┬─────────inserted_at─┐ +│ 1 │ 2022-09-01 03:29:53 │ +└─────┴─────────────────────┘ +┌─key─┬─────────inserted_at─┐ +│ 2 │ 2022-09-01 03:29:53 │ +└─────┴─────────────────────┘ + +OPTIMIZE TABLE test_materialization FINAL; + +SELECT key, inserted_at FROM test_materialization; + +┌─key─┬─────────inserted_at─┐ +│ 1 │ 2022-09-01 03:30:52 │ +│ 2 │ 2022-09-01 03:30:52 │ +│ 3 │ 2022-09-01 03:29:36 │ +└─────┴─────────────────────┘ + +SELECT key, inserted_at FROM test_materialization; + +┌─key─┬─────────inserted_at─┐ +│ 1 │ 2022-09-01 03:30:52 │ +│ 2 │ 2022-09-01 03:30:52 │ +│ 3 │ 2022-09-01 03:29:36 │ +└─────┴─────────────────────┘ + +So, data inserted after addition of column can have lower inserted_at value then old data without materialization. + +``` + +{{% /alert %}} + +If you want to backpopulate data for old parts, you have multiple options: + +#### MATERIALIZE (COLUMN/INDEX/PROJECTION) (PART[ITION ID] '') + +Will materialize this entity. + +#### OPTIMIZE TABLE xxxx (PART[ITION ID] '') (FINAL) + +Will trigger merge, which will lead to materialization of all entities in affected parts. + +#### ALTER TABLE xxxx UPDATE column_name = column_name WHERE 1; + +Will trigger mutation, which will materialize this column. + +#### DROP (COLUMN/INDEX/PROJECTION) + +Lightweight, it's only about changing of table metadata and removing corresponding files from filesystem. +For Compact parts it will trigger merge, which can be heavy. [issue](https://github.com/ClickHouse/ClickHouse/issues/27502) + + +#### MODIFY COLUMN (DATE TYPE) + +1. Change column type in table schema. +2. Schedule mutation to change type for old parts. + + +### Mutations + +Affected parts - parts with rows matching condition. + +#### ALTER TABLE xxxxx DELETE WHERE column_1 = 1; + +1. Will overwrite all column data in affected parts. +2. For all part(ition)s will create new directories on disk and write new data to them or create hardlinks if they untouched. +3. Register new parts names in ZooKeeper. + +#### ALTER TABLE xxxxx DELETE IN PARTITION ID '' WHERE column_1 = 1; + +Will do the same but only for specific partition. + +#### ALTER TABLE xxxxx UPDATE SET column_2 = column_2, column_3 = column_3 WHERE column_1 = 1; + +1. Will overwrite column_2, column_3 data in affected parts. +2. For all part(ition)s will create new directories on disk and write new data to them or create hardlinks if they untouched. +3. Register new parts names in ZooKeeper. + +#### DELETE FROM xxxxx WHERE column_1 = 1; + +1. Will create & populate hidden boolean column in affected parts. (_row_exists column) +2. For all part(ition)s will create new directories on disk and write new data to them or create hardlinks if they untouched. +3. Register new parts names in ZooKeeper. + +Despite that LWD mutations will not rewrite all columns, steps 2 & 3 in case of big tables can take significant time. + diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-aggressive_merges.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-aggressive_merges.md new file mode 100644 index 0000000000..ead7d010a0 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-aggressive_merges.md @@ -0,0 +1,69 @@ +--- +title: "Aggressive merges" +linkTitle: "Aggressive merges" +description: > + Aggressive merges +--- + + + +Q: Is there any way I can dedicate more resources to the merging process when running ClickHouse® on pretty beefy machines (like 36 cores, 1TB of RAM, and large NVMe disks)? + + +Mostly such things doing by changing the level of parallelism: + + 1.  `background_pool_size` - how many threads will be actually doing the merge (if you can push all the server resources to do the merges, i.e. no selects will be running - you can give all the cores to that, so try increasing to 36). If you use replicated table - use the same value for `max_replicated_merges_in_queue`. + + 2.  `background_merges_mutations_concurrency_ratio` - how many merges will be assigned (multiplier of background_pool_size), sometimes the default (2) may work against you since it will assign smaller merges, which is nice if you need to deal with real-time inserts, but is not important it you do bulk inserts and later start a lot of merges. So I would try 1. + + 3. `number_of_free_entries_in_pool_to_lower_max_size_of_merge` (merge_tree setting) should be changed together with background_pool_size (50-90% of that). "When there is less than a specified number of free entries in the pool (or replicated queue), start to lower the maximum size of the merge to process (or to put in the queue). This is to allow small merges to process - not filling the pool with long-running merges." To make it really aggressive try 90-95% of background_pool_size, for ex. 34 (so you will have 34 huge merges and 2 small ones). + +Additionally, you can: + + - control how big target parts will be created by the merges (max_bytes_to_merge_at_max_space_in_pool) + - disable direct io for big merges (min_merge_bytes_to_use_direct_io) - direct io is often slower (it bypasses the page cache, and it is used there to prevent pushing out the often used data from the cache by the running merge). + - on a replicated system with slow merges and a fast network you can use execute_merges_on_single_replica_time_threshold + - analyze if the Vertical or Horizontal merge is better / faster for your case/schema. (Vertical first merges the columns from the table ORDER BY and then other columns one by another - that normally requires less ram, and keep fewer files opened, but requires more complex computations compared to horizontal when all columns are merged simultaneously). + - if you have a lot of tables - you can give also give more resources to the scheduler (the component which assigns the merges, and do some housekeeping) - background_schedule_pool_size & background_common_pool_size + - review the schema, especially codes/compression used (they allow to reduce the size, but often can impact the merge speed significantly). + - try to form bigger parts when doing inserts (min_insert_block_size_bytes / min_insert_block_size_rows / max_insert_block_size) + - check if wide (every column in a separate file) or compact (columns are mixed in one file) parts are used (system.parts). By default min_bytes_for_wide_part=10 mln rows (so if the part is bigger that that the wide format will be used, compact otherwise). Sometimes it can be beneficial to use a compact format even for bigger parts (a lot of relatively small columns) or oppositely - use a wide format even for small parts (few fat columns in the table). + - consider using recent ClickHouse releases - they use compressed marks by default, which can be beneficial for reducing the i/o + +All the adjustments/performance optimizations should be controlled by some reproducible 'benchmark' so you can control/prove that the change gives the expected result (sometimes it's quite hard to predict the impact of some change on the real system). Please also monitors how system resources (especially CPU, IO + for replicated tables: network & zookeeper) are used/saturated during the test. Also monitor/plot the usage of the pools: +``` +select * from system.metrics where metric like '%PoolTask' +``` + +Those recommendations are NOT generic. For systems with real-time insert & select pressure happening together with merges - those adjustments can be 'too aggressive'. So if you have different setups with different usage patterns - avoid using the same 'aggressive' settings template for all of them. + +TL/DR version: + +``` +cat /etc/clickhouse-server/config.d/aggresive_merges.xml + + 36 + 128 + 8 + 1 + + 32 + 36 + 161061273600 + 10737418240 + + + +cat /etc/clickhouse-server/users.d/aggresive_merges.xml + + + +36 +128 +8 +1 + + + +``` + diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-check-replication-ddl-queue.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-check-replication-ddl-queue.md new file mode 100644 index 0000000000..4c505878ef --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-check-replication-ddl-queue.md @@ -0,0 +1,407 @@ +--- +title: "ClickHouse® Replication problems" +linkTitle: "Replication problems" +description: > + Finding and troubleshooting problems in the `replication_queue` +keywords: + - clickhouse replication + - clickhouse check replication status +--- + +# Common problems & solutions + +- If the replication queue does not have any Exceptions only postponed reasons without exceptions just leave ClickHouse® do Merges/Mutations and it will eventually catch up and reduce the number of tasks in `replication_queue`. Number of concurrent merges and fetches can be tuned but if it is done without an analysis of your workload then you may end up in a worse situation. If Delay in queue is going up actions may be needed: + +- First simplest approach: + try to `SYSTEM RESTART REPLICA db.table` (This will DETACH/ATTACH table internally) + + + +# How to check for replication problems + +1. Check `system.replicas` first, cluster-wide. It allows to check if the problem is local to some replica or global, and allows to see the exception. + allows to answer the following questions: + - Are there any ReadOnly replicas? + - Is there the connection to zookeeper active? + - Is there the exception during table init? (`Code: 999. Coordination::Exception: Transaction failed (No node): Op #1`) + +2. Check `system.replication_queue`. + - How many tasks there / are they moving / are there some very old tasks there? (check `created_time` column, if tasks are 24h old, it is a sign of a problem): + - You can use this qkb article query: https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-replication-queue/ + - Check if there are tasks with a high number of `num_tries` or `num_postponed` and `postponed_reason` this is a sign of stuck tasks. + - Check the problematic parts affecting the stuck tasks. You can use columns `new_part_name` or `parts_to_merge` + - Check which type is the task. If it is `MUTATE_PART` then it is a mutation task. If it is `MERGE_PARTS` then it is a merge task. These tasks can be deleted from the replication queue but `GET_PARTS` should not be deleted. + +3. Check `system.errors` + +4. Check `system.mutations`: + - You can check that in the replication queue are stuck tasks of type `MUTATE_PART`, and that those mutations are still executing `system.mutations` using column `is_done` + +5. Find the moment when the problem started and collect/analyze / preserve logs from that moment. It is usually during the first steps of a restart/crash + +6. Use `part_log` and `system.parts` to gather information of the parts related with the stuck tasks in the replication queue: + - Check if those parts exist and are active from `system.parts` (use partition_id, name as part and active columns to filter) + - Extract the part history from `system.part_log` + - Example query from `part_log`: + +```sql +SELECT hostName(), * FROM +cluster('all-sharded',system.part_log) +WHERE + hostName() IN ('chi-prod-live-2-0-0','chi-prod-live-2-2-0','chi-prod-live-2-1-0') + AND table = 'sessions_local' + AND database = 'analytics' + AND part_name in ('20230411_33631_33654_3') +``` + +7. If there are no errors, just everything get slower - check the load (usual system metrics) + + + +## Some stuck replication task for a partition that was already removed or has no data + +- This can be easily detected because some exceptions will be in the replication queue that reference a part from a partition that do not exist. Here the most probable scenario is that the partition was dropped and some tasks were left in the queue. + +- drop the partition manually once again (it should remove the task) + +- If the partition exists but the part is missing (maybe because it is superseded by a newer merged part) then you can try to DETACH/ATTACH the partition. +- Below DML generates the ALTER commands to do this: + +```sql +WITH + extract(new_part_name, '^[^_]+') as partition_id +SELECT + '/* count: ' || count() || ' */\n' || + 'ALTER TABLE ' || database || '.' || table || ' DETACH PARTITION ID \''|| partition_id || '\';\n' || + 'ALTER TABLE ' || database || '.' || table || ' ATTACH PARTITION ID \''|| partition_id || '\';\n' +FROM + system.replication_queue as rq +GROUP BY + database, table, partition_id +HAVING sum(num_tries) > 1000 OR count() > 100 +ORDER BY count() DESC, sum(num_tries) DESC +FORMAT TSVRaw; +``` + +## Problem with mutation stuck in the queue + +- This can happen if the mutation is finished and, for some reason, the task is not removed from the queue. This can be detected by checking `system.mutations` table and seeing if the mutation is done, but the task is still in the queue. + +- kill the mutation (again) + +## Replica is not starting because local set of files differs too much + +- First try increase the thresholds or set flag `force_restore_data` flag and restarting clickhouse/pod https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication#recovery-after-complete-data-loss + +## Replica is in Read-Only MODE + +Sometimes, due to crashes, zookeeper unavailability, slowness, or other reasons, some of the tables can be in Read-Only mode. This allows SELECTS but not INSERTS. So we need to do DROP / RESTORE replica procedure. + +Just to be clear, this procedure **will not delete any data**, it will just re-create the metadata in zookeeper with the current state of the [ClickHouse replica](/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/add_remove_replica/). + +How it works: + +```sql +ALTER TABLE table_name DROP DETACHED PARTITION ALL -- clean detached folder before operation. PARTITION ALL works only for the fresh clickhouse versions +DETACH TABLE table_name; -- Required for DROP REPLICA +-- Use the zookeeper_path and replica_name from system.replicas. +SYSTEM DROP REPLICA 'replica_name' FROM ZKPATH '/table_path_in_zk'; -- It will remove everything from the /table_path_in_zk/replicas/replica_name +ATTACH TABLE table_name; -- Table will be in readonly mode, because there is no metadata in ZK and after that execute +SYSTEM RESTORE REPLICA table_name; -- It will detach all partitions, re-create metadata in ZK (like it's new empty table), and then attach all partitions back +SYSTEM SYNC REPLICA table_name; -- Not mandatory. It will Wait for replicas to synchronize parts. Also it's recommended to check `system.detached_parts` on all replicas after recovery is finished. +SELECT name FROM system.detached_parts WHERE table = 'table_name'; -- check for leftovers. See the potential problems here https://altinity.com/blog/understanding-detached-parts-in-clickhouse +``` + +Starting from version 23, it's possible to use syntax [SYSTEM DROP REPLICA \'replica_name\' FROM TABLE db.table](https://clickhouse.com/docs/en/sql-reference/statements/system#drop-replica) instead of the `ZKPATH` variant, but you need to execute the above command from a different replica than the one you want to drop, which is not convenient sometimes. We recommend using the above method because it works with any version and is more reliable. + +## Procedure to restore multiple tables in Read-Only mode per replica + +It is better to make an approach per replica, because restoring a replica using ON CLUSTER could lead to race conditions that would cause errors and a big stress in zookeeper/keeper + + +```sql +SELECT + '-- Table ' || toString(row_num) || '\n' || + 'DETACH TABLE `' || database || '`.`' || table || '`;\n' || + 'SYSTEM DROP REPLICA ''' || replica_name || ''' FROM ZKPATH ''' || zookeeper_path || ''';\n' || + 'ATTACH TABLE `' || database || '`.`' || table || '`;\n' || + 'SYSTEM RESTORE REPLICA `' || database || '`.`' || table || '`;\n' +FROM ( + SELECT + *, + rowNumberInAllBlocks() + 1 as row_num + FROM ( + SELECT + database, + table, + any(replica_name) as replica_name, + any(zookeeper_path) as zookeeper_path + FROM system.replicas + WHERE is_readonly + GROUP BY database, table + ORDER BY database, table + ) + ORDER BY database, table +) +FORMAT TSVRaw; +``` + +This will generate the DDL statements to be executed per replica and generate an ouput that can be saved as an SQL file . It is important to execute the commands per replica in the sequence generated by the above DDL: + +- DETACH the table +- DROP REPLICA +- ATTACH the table +- RESTORE REPLICA + +If we do this in parallel a table could still be attaching while another query is dropping/restoring the replica in zookeeper, causing errors. + +The following bash script will read the generated SQL file and execute the commands sequentially, asking for user input in case of errors. Simply save the generated SQL to a file (e.g. `recovery_commands.sql`) and run the script below (that you can name as `clickhouse_replica_recovery.sh`): + +```bash +$ clickhouse_replica_recovery.sh recovery_commands.sql +``` + + +Here the script: + +```bash +#!/bin/bash + +# ClickHouse Replica Recovery Script +# This script executes DETACH, DROP REPLICA, ATTACH, and RESTORE REPLICA commands sequentially + +# Configuration +CLICKHOUSE_HOST="${CLICKHOUSE_HOST:-localhost}" +CLICKHOUSE_PORT="${CLICKHOUSE_PORT:-9000}" +CLICKHOUSE_USER="${CLICKHOUSE_USER:-clickhouse_operator}" +CLICKHOUSE_PASSWORD="${CLICKHOUSE_PASSWORD:-xxxxxxxxx}" +COMMANDS_FILE="${1:-recovery_commands.sql}" +LOG_FILE="recovery_$(date +%Y%m%d_%H%M%S).log" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +MAGENTA='\033[0;35m' +NC='\033[0m' # No Color + +# Function to log messages +log() { + echo -e "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE" +} + +# Function to execute a SQL statement with retry logic +execute_sql() { + local sql="$1" + local table_num="$2" + local step_name="$3" + + while true; do + log "${YELLOW}Executing command for Table $table_num - $step_name:${NC}" + log "$sql" + + # Build clickhouse-client command + local ch_cmd="clickhouse-client --host=$CLICKHOUSE_HOST --port=$CLICKHOUSE_PORT --user=$CLICKHOUSE_USER" + + if [ -n "$CLICKHOUSE_PASSWORD" ]; then + ch_cmd="$ch_cmd --password=$CLICKHOUSE_PASSWORD" + fi + + # Execute the command and capture output and exit code + local output + local exit_code + output=$(echo "$sql" | $ch_cmd 2>&1) + exit_code=$? + + # Log the output + echo "$output" | tee -a "$LOG_FILE" + + if [ $exit_code -eq 0 ]; then + log "${GREEN}✓ Successfully executed${NC}" + return 0 + else + log "${RED}✗ Failed to execute (Exit code: $exit_code)${NC}" + log "${RED}Error output: $output${NC}" + + # Ask user what to do + while true; do + echo "" + log "${MAGENTA}========================================${NC}" + log "${MAGENTA}Error occurred! Choose an option:${NC}" + log "${MAGENTA}========================================${NC}" + echo -e "${YELLOW}[R]${NC} - Retry this command" + echo -e "${YELLOW}[I]${NC} - Ignore this error and continue to next command in this table" + echo -e "${YELLOW}[S]${NC} - Skip this entire table and move to next table" + echo -e "${YELLOW}[A]${NC} - Abort script execution" + echo "" + echo -n "Enter your choice (R/I/S/A): " + + # Read from /dev/tty to get user input from terminal + read -r response < /dev/tty + + case "${response^^}" in + R|RETRY) + log "${BLUE}Retrying command...${NC}" + break # Break inner loop to retry + ;; + I|IGNORE) + log "${YELLOW}Ignoring error and continuing to next command...${NC}" + return 1 # Return error but continue + ;; + S|SKIP) + log "${YELLOW}Skipping entire table $table_num...${NC}" + return 2 # Return special code to skip table + ;; + A|ABORT) + log "${RED}Aborting script execution...${NC}" + exit 1 + ;; + *) + echo -e "${RED}Invalid option '$response'. Please enter R, I, S, or A.${NC}" + ;; + esac + done + fi + done +} + +# Main execution function +main() { + log "${BLUE}========================================${NC}" + log "${BLUE}ClickHouse Replica Recovery Script${NC}" + log "${BLUE}========================================${NC}" + log "Host: $CLICKHOUSE_HOST:$CLICKHOUSE_PORT" + log "User: $CLICKHOUSE_USER" + log "Commands file: $COMMANDS_FILE" + log "Log file: $LOG_FILE" + echo "" + + # Check if commands file exists + if [ ! -f "$COMMANDS_FILE" ]; then + log "${RED}Error: Commands file '$COMMANDS_FILE' not found!${NC}" + echo "" + echo "Usage: $0 [commands_file]" + echo " commands_file: Path to SQL commands file (default: recovery_commands.sql)" + echo "" + echo "Example: $0 my_commands.sql" + exit 1 + fi + + # Process SQL commands from file + local current_sql="" + local table_counter=0 + local step_in_table=0 + local failed_count=0 + local success_count=0 + local ignored_count=0 + local skipped_tables=() + local skip_current_table=false + + while IFS= read -r line || [ -n "$line" ]; do + # Skip empty lines + if [[ -z "$line" ]] || [[ "$line" =~ ^[[:space:]]*$ ]]; then + continue + fi + + # Check if this is a comment line indicating a new table + if [[ "$line" =~ ^[[:space:]]*--[[:space:]]*Table[[:space:]]+([0-9]+) ]]; then + table_counter="${BASH_REMATCH[1]}" + step_in_table=0 + skip_current_table=false + log "" + log "${BLUE}========================================${NC}" + log "${BLUE}Processing Table $table_counter${NC}" + log "${BLUE}========================================${NC}" + continue + elif [[ "$line" =~ ^[[:space:]]*-- ]]; then + # Skip other comment lines + continue + fi + + # Skip if we're skipping this table + if [ "$skip_current_table" = true ]; then + # Check if line ends with semicolon to count statements + if [[ "$line" =~ \;[[:space:]]*$ ]]; then + step_in_table=$((step_in_table + 1)) + fi + continue + fi + + # Accumulate the SQL statement + current_sql+="$line " + + # Check if we have a complete statement (ends with semicolon) + if [[ "$line" =~ \;[[:space:]]*$ ]]; then + step_in_table=$((step_in_table + 1)) + + # Determine the step name + local step_name="" + if [[ "$current_sql" =~ ^[[:space:]]*DETACH ]]; then + step_name="DETACH" + elif [[ "$current_sql" =~ ^[[:space:]]*SYSTEM[[:space:]]+DROP[[:space:]]+REPLICA ]]; then + step_name="DROP REPLICA" + elif [[ "$current_sql" =~ ^[[:space:]]*ATTACH ]]; then + step_name="ATTACH" + elif [[ "$current_sql" =~ ^[[:space:]]*SYSTEM[[:space:]]+RESTORE[[:space:]]+REPLICA ]]; then + step_name="RESTORE REPLICA" + fi + + log "" + log "Step $step_in_table/4: $step_name" + + # Execute the statement + local result + execute_sql "$current_sql" "$table_counter" "$step_name" + result=$? + + if [ $result -eq 0 ]; then + success_count=$((success_count + 1)) + sleep 1 # Small delay between commands + elif [ $result -eq 1 ]; then + # User chose to ignore this error + failed_count=$((failed_count + 1)) + ignored_count=$((ignored_count + 1)) + sleep 1 + elif [ $result -eq 2 ]; then + # User chose to skip this table + skip_current_table=true + skipped_tables+=("$table_counter") + log "${YELLOW}Skipping remaining commands for Table $table_counter${NC}" + fi + + # Reset current_sql for next statement + current_sql="" + fi + done < "$COMMANDS_FILE" + + # Summary + log "" + log "${BLUE}========================================${NC}" + log "${BLUE}Execution Summary${NC}" + log "${BLUE}========================================${NC}" + log "Total successful commands: ${GREEN}$success_count${NC}" + log "Total failed commands: ${RED}$failed_count${NC}" + log "Total ignored errors: ${YELLOW}$ignored_count${NC}" + log "Total tables processed: $table_counter" + + if [ ${#skipped_tables[@]} -gt 0 ]; then + log "Skipped tables: ${YELLOW}${skipped_tables[*]}${NC}" + fi + + log "Log file: $LOG_FILE" + + if [ $failed_count -eq 0 ]; then + log "${GREEN}All commands executed successfully!${NC}" + exit 0 + else + log "${YELLOW}Some commands failed or were ignored. Please check the log file.${NC}" + exit 1 + fi +} + +# Run the main function +main + +``` + diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-clickhouse-in-docker.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-clickhouse-in-docker.md index cf6bc5bbaf..6bd19aa4d9 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-clickhouse-in-docker.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-clickhouse-in-docker.md @@ -1,14 +1,14 @@ --- -title: "ClickHouse in Docker" -linkTitle: "ClickHouse in Docker" +title: "ClickHouse® in Docker" +linkTitle: "ClickHouse® in Docker" description: > - ClickHouse in Docker + ClickHouse® in Docker --- ## Do you have documentation on Docker deployments? Check -* [https://hub.docker.com/r/yandex/clickhouse-server/](https://hub.docker.com/r/yandex/clickhouse-server/) +* [https://hub.docker.com/r/clickhouse/clickhouse-server](https://hub.docker.com/r/clickhouse/clickhouse-server) * [https://docs.altinity.com/clickhouseonkubernetes/](https://docs.altinity.com/clickhouseonkubernetes/) * sources of entry point - [https://github.com/ClickHouse/ClickHouse/blob/master/docker/server/entrypoint.sh](https://github.com/ClickHouse/ClickHouse/blob/master/docker/server/entrypoint.sh) @@ -21,7 +21,7 @@ Important things: * Also, you may mount in some files or folders in the configuration folder: * `/etc/clickhouse-server/config.d/listen_ports.xml` * `--ulimit nofile=262144:262144` -* You can also set on some linux capabilities to enable some of extra features of ClickHouse (not obligatory): `SYS_PTRACE NET_ADMIN IPC_LOCK SYS_NICE` +* You can also set on some linux capabilities to enable some of extra features of ClickHouse® (not obligatory): `SYS_PTRACE NET_ADMIN IPC_LOCK SYS_NICE` * you may also mount in the folder `/docker-entrypoint-initdb.d/` - all SQL or bash scripts there will be executed during container startup. * if you use cgroup limits - it may misbehave https://github.com/ClickHouse/ClickHouse/issues/2261 (set up `` manually) * there are several ENV switches, see: [https://github.com/ClickHouse/ClickHouse/blob/master/docker/server/entrypoint.sh](https://github.com/ClickHouse/ClickHouse/blob/master/docker/server/entrypoint.sh) @@ -40,7 +40,7 @@ docker run -d \ --cap-add=IPC_LOCK \ --cap-add=SYS_PTRACE \ --network=host \ - yandex/clickhouse-server:21.1.7 + clickhouse/clickhouse-server:latest docker exec -it some-clickhouse-server clickhouse-client docker exec -it some-clickhouse-server bash diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-converting-mergetree-to-replicated.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-converting-mergetree-to-replicated.md index 8e445d9b2f..ff72e62af3 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-converting-mergetree-to-replicated.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-converting-mergetree-to-replicated.md @@ -2,19 +2,22 @@ title: "Converting MergeTree to Replicated" linkTitle: "Converting MergeTree to Replicated" description: > - Converting MergeTree to Replicated + Adding replication to a table +keywords: + - clickhouse replicatedmergetree + - clickhouse replicated --- -Options here are: +To enable replication in a table that uses the `MergeTree` engine, you need to convert the engine to `ReplicatedMergeTree`. Options here are: -1. Use`INSERT INTO foo_replicated SELECT * FROM foo` . -2. Create table aside and attach all partition from the existing table then drop original table (uses hard links don't require extra disk space). `ALTER TABLE foo_replicated ATTACH PARTITION ID 'bar' FROM 'foo'` You can easily auto generate those commands using a query like: `SELECT DISTINCT 'ALTER TABLE foo_replicated ATTACH PARTITION ID \'' || partition_id || '\' FROM foo;' from system.parts WHERE table = 'foo';` +1. Use`INSERT INTO foo_replicated SELECT * FROM foo`. (suitable for small tables) +2. Create table aside and attach all partition from the existing table then drop original table (uses hard links don't require extra disk space). `ALTER TABLE foo_replicated ATTACH PARTITION ID 'bar' FROM 'foo'` You can easily auto generate those commands using a query like: `SELECT DISTINCT 'ALTER TABLE foo_replicated ATTACH PARTITION ID \'' || partition_id || '\' FROM foo;' from system.parts WHERE table = 'foo';` See [the example below](#example-for-option-2-above) for details. 3. Do it 'in place' using some file manipulation. see the procedure described here: [https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/replication/\#converting-from-mergetree-to-replicatedmergetree](https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/replication/#converting-from-mergetree-to-replicatedmergetree) -4. Do a backup of MergeTree and recover as ReplicatedMergeTree. [https://github.com/AlexAkulov/clickhouse-backup/blob/master/Examples.md\#how-to-convert-mergetree-to-replicatedmegretree](https://github.com/AlexAkulov/clickhouse-backup/blob/master/Examples.md#how-to-convert-mergetree-to-replicatedmegretree) -5. Embedded command for that should be added in future. +4. Do a backup of MergeTree and recover as ReplicatedMergeTree. [https://github.com/Altinity/clickhouse-backup/blob/master/Examples.md\#how-to-convert-mergetree-to-replicatedmegretree](https://github.com/Altinity/clickhouse-backup/blob/master/Examples.md#how-to-convert-mergetree-to-replicatedmegretree) +5. Embedded command for recent Clickhouse versions - https://clickhouse.com/docs/en/sql-reference/statements/attach#attach-mergetree-table-as-replicatedmergetree -## example for option 2 +## Example for option 2 above -Note: ATTACH PARTITION ID 'bar' FROM 'foo'` is practically free from compute and disk space perspective. This feature utilizes filesystem hard-links and the fact that files are immutable in Clickhouse ( it's the core of the Clickhouse design, filesystem hard-links and such file manipulations are widely used ). +Note: `ATTACH PARTITION ID 'bar' FROM 'foo'` is practically free from a compute and disk space perspective. This feature utilizes filesystem hard-links and the fact that files are immutable in ClickHouse® (it's the core of the ClickHouse design, filesystem hard-links and such file manipulations are widely used). ```sql create table foo( A Int64, D Date, S String ) diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/_index.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/_index.md index edc9521312..cdcfd679a9 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/_index.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/_index.md @@ -11,7 +11,7 @@ Pros: Cons: * Decoding & encoding of common data formats may be slower / require more CPU -* The data size is usually bigger than ClickHouse formats. +* The data size is usually bigger than ClickHouse® formats. * Some of the common data formats have limitations. {{% alert title="Info" color="info" %}} @@ -31,35 +31,12 @@ Pros: Cons: * Uses CPU / RAM (mostly on the receiver side) -See details in: +See details of both approaches in: [remote-table-function.md]({{}}) -## clickhouse-copier - -Pros: -* Possible to do **some** changes in schema. -* Needs only access to ClickHouse TCP port. -* It’s possible to change the distribution of data between shards. -* Suitable for large clusters: many clickhouse-copier can execute the same task together. - -Cons: -* May create an inconsistent result if source cluster data is changing during the process. -* Hard to setup. -* Requires zookeeper. -* Uses CPU / RAM (mostly on the clickhouse-copier and receiver side) +[distributed-table-cluster.md]({{}}) -{{% alert title="Info" color="info" %}} -Internally it works like smart `INSERT INTO cluster(…) SELECT * FROM ...` with some consistency checks. -{{% /alert %}} - -{{% alert title="Info" color="info" %}} -Run clickhouse copier on the same nodes as receiver clickhouse, to avoid doubling the network load. -{{% /alert %}} - -See details in: - -[altinity-kb-clickhouse-copier]({{}}) ## Manual parts moving: freeze / rsync / attach @@ -89,9 +66,9 @@ Cons: Just create the backup on server 1, upload it to server 2, and restore the backup. -See [https://github.com/AlexAkulov/clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup) +See [https://github.com/Altinity/clickhouse-backup](https://github.com/Altinity/clickhouse-backup) -[https://altinity.com/blog/introduction-to-clickhouse-backups-and-clickhouse-backup]("https://altinity.com/blog/introduction-to-clickhouse-backups-and-clickhouse-backup") +[https://altinity.com/blog/introduction-to-clickhouse-backups-and-clickhouse-backup](https://altinity.com/blog/introduction-to-clickhouse-backups-and-clickhouse-backup) ## Fetch from zookeeper path @@ -100,7 +77,7 @@ Pros: Cons: * Table schema should be the same. -* Works only when the source and the destination clickhouse servers share the same zookeeper (without chroot) +* Works only when the source and the destination ClickHouse servers share the same zookeeper (without chroot) * Needs to access zookeeper and ClickHouse replication ports: (`interserver_http_port` or `interserver_https_port`) ```sql @@ -108,21 +85,23 @@ ALTER TABLE table_name FETCH PARTITION partition_expr FROM 'path-in-zookeeper' ``` [alter table fetch detail]({{}}) -## Replication protocol +## Using the replication protocol by adding a new replica Just make one more replica in another place. Pros: * Simple to setup * Data is consistent all the time automatically. -* Low CPU and network usage. +* Low CPU and network usage should be tuned. Cons: * Needs to reach both zookeeper client (2181) and ClickHouse replication ports: (`interserver_http_port` or `interserver_https_port`) * In case of cluster migration, zookeeper need’s to be migrated too. -* Replication works both ways. +* Replication works both ways so new replica should be outside the main cluster. + +Check the details in: -[../altinity-kb-zookeeper/altinity-kb-zookeeper-cluster-migration.md](../altinity-kb-zookeeper/altinity-kb-zookeeper-cluster-migration.md) +[Add a replica to a Cluster]({{}}) ## See also diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/add_remove_replica.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/add_remove_replica.md new file mode 100644 index 0000000000..5d47f3efcd --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/add_remove_replica.md @@ -0,0 +1,277 @@ +--- +title: "Add/Remove a new replica to a ClickHouse® cluster" +linkTitle: "add_remove_replica" +description: > + How to add/remove a new ClickHouse replica manually and using `clickhouse-backup` +keywords: + - clickhouse replica + - clickhouse add replica + - clickhouse remove replica +--- + +## ADD nodes/replicas to a ClickHouse® cluster + +To add some ClickHouse® replicas to an existing cluster if -30TB then better to use replication: + +- don’t add the `remote_servers.xml` until replication is done. +- Add these files and restart to limit bandwidth and avoid saturation (70% total bandwidth): + +[Core Settings | ClickHouse Docs](https://clickhouse.com/docs/en/operations/settings/settings/#max_replicated_fetches_network_bandwidth_for_server) + +💡 Do the **Gbps to Bps** math correctly. For 10G —> 1250MB/s —> 1250000000 B/s and change `max_replicated_*` settings accordingly: + +- Nodes replicating from: + +```xml + + + + 50000 + + + +``` + +- Nodes replicating to: + +```xml + + + + 50000 + + + +``` + +### Manual method (DDL) + +- Create tables `manually` and be sure macros in all replicas are aligned with the ZK path. If zk path uses `{cluster}` then this method won’t work. ZK path should use `{shard}` and `{replica}` or `{uuid}` (if databases are Atomic) only. + +```sql +-- DDL for Databases +SELECT concat('CREATE DATABASE "', name, '" ENGINE = ', engine_full, ';') +FROM system.databases WHERE name NOT IN ('system', 'information_schema', 'INFORMATION_SCHEMA') +INTO OUTFILE '/tmp/databases.sql' +FORMAT TSVRaw; +-- DDL for tables and views +SELECT + replaceRegexpOne(replaceOne(concat(create_table_query, ';'), '(', 'ON CLUSTER \'{cluster}\' ('), 'CREATE (TABLE|DICTIONARY|VIEW|LIVE VIEW|WINDOW VIEW)', 'CREATE \\1 IF NOT EXISTS') +FROM + system.tables +WHERE engine != 'MaterializedView' and + database NOT IN ('system', 'information_schema', 'INFORMATION_SCHEMA') AND + create_table_query != '' AND + name NOT LIKE '.inner.%%' AND + name NOT LIKE '.inner_id.%%' +INTO OUTFILE '/tmp/schema.sql' AND STDOUT +FORMAT TSVRaw +SETTINGS show_table_uuid_in_table_create_query_if_not_nil=1; +--- DDL only for materialized views +SELECT + replaceRegexpOne(replaceOne(concat(create_table_query, ';'), 'TO', 'ON CLUSTER \'{cluster}\' TO'), '(CREATE MATERIALIZED VIEW)', '\\1 IF NOT EXISTS') +FROM + system.tables +WHERE engine = 'MaterializedView' and + database NOT IN ('system', 'information_schema', 'INFORMATION_SCHEMA') AND + create_table_query != '' AND + name NOT LIKE '.inner.%%' AND + name NOT LIKE '.inner_id.%%' AND + as_select != '' +INTO OUTFILE '/tmp/schema.sql' APPEND AND STDOUT +FORMAT TSVRaw +SETTINGS show_table_uuid_in_table_create_query_if_not_nil=1; +``` + +This will generate the UUIDs in the CREATE TABLE definition, something like this: + +```sql +CREATE TABLE IF NOT EXISTS default.insert_test UUID '51b41170-5192-4947-b13b-d4094c511f06' ON CLUSTER '{cluster}' (`id_order` UInt16, `id_plat` UInt32, `id_warehouse` UInt64, `id_product` UInt16, `order_type` UInt16, `order_status` String, `datetime_order` DateTime, `units` Int16, `total` Float32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}') PARTITION BY tuple() ORDER BY (id_order, id_plat, id_warehouse) SETTINGS index_granularity = 8192; +``` + +- Copy both SQL to destination replica and execute + +```sql +clickhouse-client --host localhost --port 9000 -mn < databases.sql +clickhouse-client --host localhost --port 9000 -mn < schema.sql +``` + +### Using `clickhouse-backup` + +- Using `clickhouse-backup` to copy the schema of a replica to another is also convenient and moreover if [using Atomic database](/engines/altinity-kb-atomic-database-engine/) with `{uuid}` macros in [ReplicatedMergeTree engines](https://www.youtube.com/watch?v=oHwhXc0re6k): + +```bash +sudo -u clickhouse clickhouse-backup create --schema --rbac --named-collections rbac_and_schema +# From the destination replica do this in 2 steps: +sudo -u clickhouse clickhouse-backup restore --rbac-only rbac_and_schema +sudo -u clickhouse clickhouse-backup restore --schema --named-collections rbac_and_schema + +``` + +### Using `altinity operator` + +If there is at least one alive replica in the shard, you can remove PVCs and STS for affected nodes and trigger reconciliation. The operator will try to copy the schema from other replicas. + +### Check that schema migration was successful and node is replicating + +- To check that the schema migration has been **successful** query system.replicas: + +```sql +SELECT DISTINCT database,table,replica_is_active FROM system.replicas FORMAT Vertical +``` + +- Check how the replication process is performing using https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-replication-queue/ + + - If there are many postponed tasks with message: + + ```sql + Not executing fetch of part 7_22719661_22719661_0 because 16 fetches already executing, max 16. │ 2023-09-25 17:03:06 │ │ + ``` + + then it is ok, the maximum replication slots are being used. Exceptions are not OK and should be investigated + +- If migration was successful and replication is working then wait until the replication is finished. It may take some days depending on how much data is being replicated. After this edit the cluster configuration xml file for all replicas (`remote_servers.xml`) and add the new replica to the cluster. + + +### Possible problems + +#### **Exception** `REPLICA_ALREADY_EXISTS` + +```sql +Code: 253. DB::Exception: Received from localhost:9000. +DB::Exception: There was an error on [dl-ny2-vm-09.internal.io:9000]: +Code: 253. DB::Exception: Replica /clickhouse/tables/3c3503c3-ed3c-443b-9cb3-ef41b3aed0a8/1/replicas/dl-ny2-vm-09.internal.io +already exists. (REPLICA_ALREADY_EXISTS) (version 23.5.3.24 (official build)). (REPLICA_ALREADY_EXISTS) +(query: CREATE TABLE IF NOT EXISTS xxxx.yyyy UUID '3c3503c3-ed3c-443b-9cb3-ef41b3aed0a8' +``` + +[The DDLs](/altinity-kb-setup-and-maintenance/altinity-kb-check-replication-ddl-queue/) have been executed and some tables have been created and after that dropped but some left overs are left in ZK: +- If databases can be dropped then use `DROP DATABASE xxxxx SYNC` +- If databases cannot be dropped use `SYSTEM DROP REPLICA ‘replica_name’ FROM db.table` + +#### **Exception** `TABLE_ALREADY_EXISTS` + +```sql +Code: 57. DB::Exception: Received from localhost:9000. +DB::Exception: There was an error on [dl-ny2-vm-09.internal.io:9000]: +Code: 57. DB::Exception: Directory for table data store/3c3/3c3503c3-ed3c-443b-9cb3-ef41b3aed0a8/ already exists. +(TABLE_ALREADY_EXISTS) (version 23.5.3.24 (official build)). (TABLE_ALREADY_EXISTS) +(query: CREATE TABLE IF NOT EXISTS xxxx.yyyy UUID '3c3503c3-ed3c-443b-9cb3-ef41b3aed0a8' ON CLUSTER '{cluster}' +``` + +Tables have not been dropped correctly: + - If databases can be dropped then use `DROP DATABASE xxxxx SYNC` + - If databases cannot be dropped use: + +```sql +SELECT concat('DROP TABLE ', database, '.', name, ' SYNC;') +FROM system.tables +WHERE database NOT IN ('system', 'information_schema', 'INFORMATION_SCHEMA') +INTO OUTFILE '/tmp/drop_tables.sql' +FORMAT TSVRaw; +``` + +### Tuning + +- Sometimes replication goes very fast and if you have a tiered storage hot/cold you could run out of space, so for that it is interesting to: + - reduce fetches from 8 to 4 + - increase moves from 8 to 16 + +```xml + + + + 625000000 + 4 + 16 + + + +``` + +- Also to monitor this with: + +```sql +SELECT * +FROM system.metrics +WHERE metric LIKE '%Move%' + +Query id: 5050155b-af4a-474f-a07a-f2f7e95fb395 + +┌─metric─────────────────┬─value─┬─description──────────────────────────────────────────────────┐ +│ BackgroundMovePoolTask │ 0 │ Number of active tasks in BackgroundProcessingPool for moves │ +└────────────────────────┴───────┴──────────────────────────────────────────────────────────────┘ + +1 row in set. Elapsed: 0.164 sec. + +dnieto-test :) SELECT * FROM system.metrics WHERE metric LIKE '%Fetch%'; + +SELECT * +FROM system.metrics +WHERE metric LIKE '%Fetch%' + +Query id: 992cae2a-fb58-4150-a088-83273805d0c4 + +┌─metric────────────────────┬─value─┬─description───────────────────────────────────────────────┐ +│ ReplicatedFetch │ 0 │ Number of data parts being fetched from replica │ +│ BackgroundFetchesPoolTask │ 0 │ Number of active fetches in an associated background pool │ +└───────────────────────────┴───────┴───────────────────────────────────────────────────────────┘ + +2 rows in set. Elapsed: 0.163 sec. +``` + +- There are new tables in v23 `system.replicated_fetches` and `system.moves` check it out for more info. +- if needed just stop replication using `SYSTEM STOP FETCHES` from the replicating nodes + + +## REMOVE nodes/Replicas from a Cluster + +- It is important to know which replica/node you want to remove to avoid problems. To check it you need to connect to a different replica/node that the one you want to remove. For instance we want to remove `arg_t04`, so we connected to replica `arg_t01`: + +```sql +SELECT DISTINCT arrayJoin(mapKeys(replica_is_active)) AS replica_name +FROM system.replicas + +┌─replica_name─┐ +│ arg_t01 │ +│ arg_t02 │ +│ arg_t03 │ +│ arg_t04 │ +└──────────────┘ +``` + +- After that (make sure you're connected to a replica different from the one that you want to remove, `arg_tg01`) and execute: + +```sql +SYSTEM DROP REPLICA 'arg_t04' +``` + +- If by any chance you're connected to the same replica you want to remove then **`SYSTEM DROP REPLICA`** will not work. +- BTW `SYSTEM DROP REPLICA` does not drop any tables and does not remove any data or metadata from disk, it will only remove metadata from Zookeeper/Keeper + +```sql +-- What happens if executing system drop replica in the local replica to remove. +SYSTEM DROP REPLICA 'arg_t04' + +Elapsed: 0.017 sec. + +Received exception from server (version 23.8.6): +Code: 305. DB::Exception: Received from dnieto-zenbook.lan:9440. DB::Exception: We can't drop local replica, please use `DROP TABLE` if you want to clean the data and drop this replica. (TABLE_WAS_NOT_DROPPED) +``` + +- After DROP REPLICA, we need to check that the replica is gone from the list or replicas: + +```sql +SELECT DISTINCT arrayJoin(mapKeys(replica_is_active)) AS replica_name +FROM system.replicas + +┌─replica_name─┐ +│ arg_t01 │ +│ arg_t02 │ +│ arg_t03 │ +└──────────────┘ + +-- We should see there is no replica arg_t04 +``` + +- Delete the replica in the cluster configuration: `remote_servers.xml` and shutdown the node/replica removed. diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/_index.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/_index.md index 7544fbab80..b4a57d4b1a 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/_index.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/_index.md @@ -4,27 +4,23 @@ linkTitle: "clickhouse-copier" description: > clickhouse-copier --- -The description of the utility and its parameters, as well as examples of the config files that you need to create for the copier are in the doc [https://clickhouse.tech/docs/en/operations/utilities/clickhouse-copier/](https://clickhouse.tech/docs/en/operations/utilities/clickhouse-copier/) +The description of the utility and its parameters, as well as examples of the config files that you need to create for the copier are in the official repo for the [ClickHouse® copier utility](https://github.com/clickhouse/copier/) The steps to run a task: -1. Create a config file for clickhouse-copier (zookeeper.xml) - - [https://clickhouse.tech/docs/en/operations/utilities/clickhouse-copier/\#format-of-zookeeper-xml](https://clickhouse.tech/docs/en/operations/utilities/clickhouse-copier/#format-of-zookeeper-xml) - +1. Create a config file for `clickhouse-copier` (zookeeper.xml) 2. Create a config file for the task (task1.xml) +3. Create the task in ZooKeeper and start an instance of `clickhouse-copier` - [https://clickhouse.tech/docs/en/operations/utilities/clickhouse-copier/\#configuration-of-copying-tasks](https://clickhouse.tech/docs/en/operations/utilities/clickhouse-copier/#configuration-of-copying-tasks) - -3. Create the task in ZooKeeper and start an instance of clickhouse-copier`clickhouse-copier --daemon --base-dir=/opt/clickhouse-copier --config /opt/clickhouse-copier/zookeeper.xml --task-path /clickhouse/copier/task1 --task-file /opt/clickhouse-copier/task1.xml` + `clickhouse-copier --daemon --base-dir=/opt/clickhouse-copier --config=/opt/clickhouse-copier/zookeeper.xml --task-path=/clickhouse/copier/task1 --task-file=/opt/clickhouse-copier/task1.xml` -If the node in ZooKeeper already exists and you want to change it, you need to add the `task-upload-force` parameter: + If the node in ZooKeeper already exists and you want to change it, you need to add the `task-upload-force` parameter: -`clickhouse-copier --daemon --base-dir=/opt/clickhouse-copier --config /opt/clickhouse-copier/zookeeper.xml --task-path /clickhouse/copier/task1 --task-file /opt/clickhouse-copier/task1.xml --task-upload-force 1` + `clickhouse-copier --daemon --base-dir=/opt/clickhouse-copier --config=/opt/clickhouse-copier/zookeeper.xml --task-path=/clickhouse/copier/task1 --task-file=/opt/clickhouse-copier/task1.xml --task-upload-force=1` -If you want to run another instance of clickhouse-copier for the same task, you need to copy the config file (zookeeper.xml) to another server, and run this command: + If you want to run another instance of `clickhouse-copier` for the same task, you need to copy the config file (zookeeper.xml) to another server, and run this command: -`clickhouse-copier --daemon --base-dir=/opt/clickhouse-copier --config /opt/clickhouse-copier/zookeeper.xml --task-path /clickhouse/copier/task1` + `clickhouse-copier --daemon --base-dir=/opt/clickhouse-copier --config=/opt/clickhouse-copier/zookeeper.xml --task-path=/clickhouse/copier/task1` The number of simultaneously running instances is controlled be the `max_workers` parameter in your task configuration file. If you run more workers superfluous workers will sleep and log messages like this: @@ -32,11 +28,10 @@ The number of simultaneously running instances is controlled be the `max_workers ### See also -* https://clickhouse.tech/docs/en/operations/utilities/clickhouse-copier/ +* https://github.com/clickhouse/copier/ * Никита Михайлов. Кластер ClickHouse ctrl-с ctrl-v. HighLoad++ Весна 2021 [slides]( https://raw.githubusercontent.com/ClickHouse/clickhouse-presentations/master/highload2021/copier.pdf) * 21.7 have a huge bulk of fixes / improvements. https://github.com/ClickHouse/ClickHouse/pull/23518 * https://altinity.com/blog/2018/8/22/clickhouse-copier-in-practice -* http://www.clickhouse.com.cn/topic/601fb322b06e5e0f21ba79e1 * https://github.com/getsentry/snuba/blob/master/docs/clickhouse-copier.md * https://hughsite.com/post/clickhouse-copier-usage.html * https://www.jianshu.com/p/c058edd664a6 diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-20.3-and-earlier.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-20.3-and-earlier.md index ece660b90a..d2d46103e4 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-20.3-and-earlier.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-20.3-and-earlier.md @@ -4,10 +4,10 @@ linkTitle: "clickhouse-copier 20.3 and earlier" description: > clickhouse-copier 20.3 and earlier --- -Clickhouse-copier was created to move data between clusters. +`clickhouse-copier` was created to move data between clusters. It runs simple INSERT…SELECT queries and can copy data between tables with different engine parameters and between clusters with different number of shards. In the task configuration file you need to describe the layout of the source and the target cluster, and list the tables that you need to copy. You can copy whole tables or specific partitions. -Clickhouse-copier uses temporary distributed tables to select from the source cluster and insert into the target cluster. +`clickhouse-copier` uses temporary distributed tables to select from the source cluster and insert into the target cluster. ## The process is as follows @@ -27,17 +27,17 @@ If a worker was interrupted, another worker can be started to continue the task. ## Configuring the engine of the target table -Clickhouse-copier uses the engine from the task configuration file for these purposes: +`clickhouse-copier` uses the engine from the task configuration file for these purposes: * to create target tables if they don’t exist. * PARTITION BY: to SELECT a partition of data from the source table, to DROP existing partitions from target tables. -Clickhouse-copier does not support the old MergeTree format. -However, you can create the target tables manually and specify the engine in the task configuration file in the new format so that clickhouse-copier can parse it for its SELECT queries. +`clickhouse-copier` does not support the old MergeTree format. +However, you can create the target tables manually and specify the engine in the task configuration file in the new format so that `clickhouse-copier` can parse it for its SELECT queries. ## How to monitor the status of running tasks -Clickhouse-copier uses ZooKeeper to keep track of the progress and to communicate between workers. +`clickhouse-copier` uses ZooKeeper to keep track of the progress and to communicate between workers. Here is a list of queries that you can use to see what’s happening. ```sql diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-20.4+.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-20.4_21.6.md similarity index 58% rename from content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-20.4+.md rename to content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-20.4_21.6.md index 42a7ca6fe7..c3bf83b6a5 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-20.4+.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-20.4_21.6.md @@ -3,23 +3,25 @@ title: "clickhouse-copier 20.4 - 21.6" linkTitle: "clickhouse-copier 20.4 - 21.6" description: > clickhouse-copier 20.4 - 21.6 +aliases: + /altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-20.4+/ --- -Clickhouse-copier was created to move data between clusters. -It runs simple INSERT…SELECT queries and can copy data between tables with different engine parameters and between clusters with different number of shards. +`clickhouse-copier` was created to move data between clusters. +It runs simple `INSERT…SELECT` queries and can copy data between tables with different engine parameters and between clusters with different number of shards. In the task configuration file you need to describe the layout of the source and the target cluster, and list the tables that you need to copy. You can copy whole tables or specific partitions. -Clickhouse-copier uses temporary distributed tables to select from the source cluster and insert into the target cluster. +`clickhouse-copier` uses temporary distributed tables to select from the source cluster and insert into the target cluster. -The behavior of clickhouse-copier was changed in 20.4: +The behavior of `clickhouse-copier` was changed in 20.4: -* Now clickhouse-copier inserts data into intermediate tables, and after the insert finishes successfully clickhouse-copier attaches the completed partition into the target table. This allows for incremental data copying, because the data in the target table is intact during the process. **Important note:** ATTACH PARTITION respects the `max_partition_size_to_drop` limit. Make sure the `max_partition_size_to_drop` limit is big enough (or set to zero) in the destination cluster. If clickhouse-copier is unable to attach a partition because of the limit, it will proceed to the next partition, and it will drop the intermediate table when the task is finished (if the intermediate table is less than the `max_table_size_to_drop` limit). **Another important note:** ATTACH PARTITION is replicated. The attached partition will need to be downloaded by the other replicas. This can create significant network traffic between ClickHouse nodes. If an attach takes a long time, clickhouse-copier will log a timeout and will proceed to the next step. -* Now clickhouse-copier splits the source data into chunks and copies them one by one. This is useful for big source tables, when inserting one partition of data can take hours. If there is an error during the insert clickhouse-copier has to drop the whole partition and start again. The `number_of_splits` parameter lets you split your data into chunks so that in case of an exception clickhouse-copier has to re-insert only one chunk of the data. -* Now clickhouse-copier runs `OPTIMIZE target_table PARTITION ... DEDUPLICATE` for non-Replicated MergeTree tables. **Important note:** This is a very strange feature that can do more harm than good. We recommend to disable it by configuring the engine of the target table as Replicated in the task configuration file, and create the target tables manually if they are not supposed to be replicated. Intermediate tables are always created as plain MergeTree. +* Now `clickhouse-copier` inserts data into intermediate tables, and after the insert finishes successfully `clickhouse-copier` attaches the completed partition into the target table. This allows for incremental data copying, because the data in the target table is intact during the process. **Important note:** ATTACH PARTITION respects the `max_partition_size_to_drop` limit. Make sure the `max_partition_size_to_drop` limit is big enough (or set to zero) in the destination cluster. If `clickhouse-copier` is unable to attach a partition because of the limit, it will proceed to the next partition, and it will drop the intermediate table when the task is finished (if the intermediate table is less than the `max_table_size_to_drop` limit). **Another important note:** ATTACH PARTITION is replicated. The attached partition will need to be downloaded by the other replicas. This can create significant network traffic between ClickHouse nodes. If an attach takes a long time, `clickhouse-copier` will log a timeout and will proceed to the next step. +* Now `clickhouse-copier` splits the source data into chunks and copies them one by one. This is useful for big source tables, when inserting one partition of data can take hours. If there is an error during the insert `clickhouse-copier` has to drop the whole partition and start again. The `number_of_splits` parameter lets you split your data into chunks so that in case of an exception `clickhouse-copier` has to re-insert only one chunk of the data. +* Now `clickhouse-copier` runs `OPTIMIZE target_table PARTITION ... DEDUPLICATE` for non-Replicated MergeTree tables. **Important note:** This is a very strange feature that can do more harm than good. We recommend to disable it by configuring the engine of the target table as Replicated in the task configuration file, and create the target tables manually if they are not supposed to be replicated. Intermediate tables are always created as plain MergeTree. ## The process is as follows 1. Process the configuration files. 2. Discover the list of partitions if not provided in the config. -3. Copy partitions one by one _**I’m not sure of the order since I was copying from 1 shard to 4 shards.**_ _**The metadata in ZooKeeper suggests the order described here.**_ +3. Copy partitions one by one ** The metadata in ZooKeeper suggests the order described here.** 1. Copy chunks of data one by one. 1. Copy data from source shards one by one. 1. Create intermediate tables on all shards of the target cluster. @@ -37,23 +39,23 @@ If a worker was interrupted, another worker can be started to continue the task. ## Configuring the engine of the target table -Clickhouse-copier uses the engine from the task configuration file for these purposes: +`clickhouse-copier` uses the engine from the task configuration file for these purposes: * to create target and intermediate tables if they don’t exist. * PARTITION BY: to SELECT a partition of data from the source table, to ATTACH partitions into target tables, to DROP incomplete partitions from intermediate tables, to OPTIMIZE partitions after they are attached to the target. * ORDER BY: to SELECT a chunk of data from the source table. -Here is an example of SELECT that clickhouse-copier runs to get the sixth of ten chunks of data: +Here is an example of SELECT that `clickhouse-copier` runs to get the sixth of ten chunks of data: ```sql WHERE ( = ( AS partition_key)) AND (cityHash64() % 10 = 6 ) ``` -Clickhouse-copier does not support the old MergeTree format. -However, you can create the intermediate tables manually with the same engine as the target tables (otherwise ATTACH will not work), and specify the engine in the task configuration file in the new format so that clickhouse-copier can parse it for SELECT, ATTACH PARTITION and DROP PARTITION queries. +`clickhouse-copier` does not support the old MergeTree format. +However, you can create the intermediate tables manually with the same engine as the target tables (otherwise ATTACH will not work), and specify the engine in the task configuration file in the new format so that `clickhouse-copier` can parse it for SELECT, ATTACH PARTITION and DROP PARTITION queries. -**Important note**: always configure engine as Replicated to disable OPTIMIZE … DEDUPLICATE (unless you know why you need clickhouse-copier to run OPTIMIZE … DEDUPLICATE). +**Important note**: always configure engine as Replicated to disable OPTIMIZE … DEDUPLICATE (unless you know why you need `clickhouse-copier` to run OPTIMIZE … DEDUPLICATE). ## How to configure the number of chunks @@ -70,11 +72,11 @@ You can change this parameter in the `table` section of the task configuration f ## How to monitor the status of running tasks -Clickhouse-copier uses ZooKeeper to keep track of the progress and to communicate between workers. +`clickhouse-copier` uses ZooKeeper to keep track of the progress and to communicate between workers. Here is a list of queries that you can use to see what’s happening. ```sql ---task-path /clickhouse/copier/task1 +--task-path=/clickhouse/copier/task1 -- The task config select * from system.zookeeper @@ -82,11 +84,17 @@ where path='' name | ctime | mtime ----------------------------+---------------------+-------------------- description | 2021-03-22 13:15:48 | 2021-03-22 13:25:28 +status | 2021-03-22 13:15:48 | 2021-03-22 13:25:28 task_active_workers_version | 2021-03-22 13:15:48 | 2021-03-22 20:32:09 tables | 2021-03-22 13:16:47 | 2021-03-22 13:16:47 task_active_workers | 2021-03-22 13:15:48 | 2021-03-22 13:15:48 +-- Status +select * from system.zookeeper +where path='/status' + + -- Running workers select * from system.zookeeper where path='/task_active_workers' diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-kubernetes-job.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-kubernetes-job.md new file mode 100644 index 0000000000..849449ba7f --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/altinity-kb-clickhouse-copier-kubernetes-job.md @@ -0,0 +1,255 @@ +--- +title: "Kubernetes job for clickhouse-copier" +linkTitle: "Kubernetes job for clickhouse-copier" +description: > + Kubernetes job for `clickhouse-copier` +--- +# `clickhouse-copier` deployment in kubernetes + +`clickhouse-copier` can be deployed in a kubernetes environment to automate some simple backups or copy fresh data between clusters. + +Some documentation to read: +* https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/altinity-kb-clickhouse-copier/ +* https://github.com/clickhouse/copier/ + +## Deployment + +Use a kubernetes job is recommended but a simple pod can be used if you only want to execute the copy one time. + +Just edit/change all the ```yaml``` files to your needs. + +### 1) Create the PVC: + +First create a namespace in which all the pods and resources are going to be deployed + +```bash +kubectl create namespace clickhouse-copier +``` + +Then create the PVC using a ```storageClass``` gp2-encrypted class or use any other storageClass from other providers: + +```yaml +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: copier-logs + namespace: clickhouse-copier +spec: + storageClassName: gp2-encrypted + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +``` + +and deploy: + +```bash +kubectl -n clickhouse-copier create -f ./kubernetes/copier-pvc.yaml +``` + +### 2) Create the configmap: + +The configmap has both files ```zookeeper.xml``` and ```task01.xml``` with the zookeeper node listing and the parameters for the task respectively. + +```yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: copier-config + namespace: clickhouse-copier +data: + task01.xml: | + + + true + + + trace + + + + + + clickhouse01.svc.cluster.local + 9000 + chcopier + pass + + + clickhouse02.svc.cluster.local + 9000 + chcopier + pass + + + + + + + + clickhouse03.svc.cluster.local + 9000 + chcopier + pass + + + + + clickhouse03.svc.cluster.local + 9000 + chcopier + pass + + + + + 1 + + 1 + + + 0 + + + 3 + 1 + + + + all-replicated + default + fact_sales_event + all-sharded + default + fact_sales_event + + Engine=ReplicatedMergeTree('/clickhouse/{cluster}/tables/{shard}/fact_sales_event', '{replica}') + PARTITION BY toYYYYMM(timestamp) + ORDER BY (channel_id, product_id) + SETTINGS index_granularity = 8192 + + rand() + + + + zookeeper.xml: | + + + trace + 100M + 3 + + + + zookeeper1.svc.cluster.local + 2181 + + + zookeeper2.svc.cluster.local + 2181 + + + zookeeper3.svc.cluster.local + 2181 + + + +``` + +and deploy: + +```bash +kubectl -n clickhouse-copier create -f ./kubernetes/copier-configmap.yaml +``` + +The ```task01.xml``` file has many parameters to take into account explained in the repo for [clickhouse-copier](https://github.com/clickhouse/copier/). Important to note that it is needed a FQDN for the Zookeeper nodes and ClickHouse® server that are valid for the cluster. As the deployment creates a new namespace, it is recommended to use a FQDN linked to a service. For example ```zookeeper01.svc.cluster.local```. This file should be adapted to both clusters topologies and to the needs of the user. + +The ```zookeeper.xml``` file is pretty straightforward with a simple 3 node ensemble configuration. + +### 3) Create the job: + +Basically the job will download the official ClickHouse image and will create a pod with 2 containers: + + - clickhouse-copier: This container will run the clickhouse-copier utility. + + - sidecar-logging: This container will be used to read the logs of the clickhouse-copier container for different runs (this part can be improved): + +```yaml +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: clickhouse-copier-test + namespace: clickhouse-copier +spec: + # only for kubernetes 1.23 + # ttlSecondsAfterFinished: 86400 + template: + spec: + containers: + - name: clickhouse-copier + image: clickhouse/clickhouse-server:21.8 + command: + - clickhouse-copier + - --task-upload-force=1 + - --config-file=$(CH_COPIER_CONFIG) + - --task-path=$(CH_COPIER_TASKPATH) + - --task-file=$(CH_COPIER_TASKFILE) + - --base-dir=$(CH_COPIER_BASEDIR) + env: + - name: CH_COPIER_CONFIG + value: "/var/lib/clickhouse/tmp/zookeeper.xml" + - name: CH_COPIER_TASKPATH + value: "/clickhouse/copier/tasks/task01" + - name: CH_COPIER_TASKFILE + value: "/var/lib/clickhouse/tmp/task01.xml" + - name: CH_COPIER_BASEDIR + value: "/var/lib/clickhouse/tmp" + resources: + limits: + cpu: "1" + memory: 2048Mi + volumeMounts: + - name: copier-config + mountPath: /var/lib/clickhouse/tmp/zookeeper.xml + subPath: zookeeper.xml + - name: copier-config + mountPath: /var/lib/clickhouse/tmp/task01.xml + subPath: task01.xml + - name: copier-logs + mountPath: /var/lib/clickhouse/tmp + - name: sidecar-logger + image: busybox:1.35 + command: ['/bin/sh', '-c', 'tail', '-n', '1000', '-f', '/tmp/copier-logs/clickhouse-copier*/*.log'] + resources: + limits: + cpu: "1" + memory: 512Mi + volumeMounts: + - name: copier-logs + mountPath: /tmp/copier-logs + volumes: + - name: copier-config + configMap: + name: copier-config + items: + - key: zookeeper.xml + path: zookeeper.xml + - key: task01.xml + path: task01.xml + - name: copier-logs + persistentVolumeClaim: + claimName: copier-logs + restartPolicy: Never + backoffLimit: 3 +``` + +Deploy and watch progress checking the logs: + +```bash +kubectl -n clickhouse-copier logs sidecar-logging +``` diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/distributed-table-cluster.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/distributed-table-cluster.md new file mode 100644 index 0000000000..214240a014 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/distributed-table-cluster.md @@ -0,0 +1,139 @@ +--- +title: "Distributed table to ClickHouse® Cluster" +linkTitle: "Distributed table to cluster" +description: > + Shifting INSERTs to a standby cluster +keywords: + - clickhouse distributed table + - clickhouse distributed +--- + +In order to shift INSERTS to a standby cluster (for example increase zone availability or [disaster recovery](https://docs.altinity.com/operationsguide/availability-and-recovery/recovery-architecture/)) some ClickHouse® features can be used. + +Basically we need to create a distributed table, a MV, rewrite the `remote_servers.xml` config file and tune some parameters. + +Distributed engine information and parameters: +https://clickhouse.com/docs/en/engines/table-engines/special/distributed/ + +## Steps + +### Create a Distributed table in the source cluster + +For example, we should have a `ReplicatedMergeTree` table in which all inserts are falling. This table is the first step in our pipeline: + +```sql +CREATE TABLE db.inserts_source ON CLUSTER 'source' +( + column1 String + column2 DateTime + ..... +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/inserts_source', '{replica}') +PARTITION BY toYYYYMM(column2) +ORDER BY (column1, column2) +``` + +This table lives in the source cluster and all INSERTS go there. In order to shift all INSERTS in the source cluster to destination cluster we can create a `Distributed` table that points to another `ReplicatedMergeTree` in the destination cluster: + +```sql +CREATE TABLE db.inserts_source_dist ON CLUSTER 'source' +( + column1 String + column2 DateTime + ..... +) +ENGINE = Distributed('destination', db, inserts_destination) +``` + +### Create a Materialized View to shift INSERTS to destination cluster: + +```sql +CREATE MATERIALIZED VIEW shift_inserts ON CLUSTER 'source' +TO db.inserts_source_dist AS +SELECT * FROM db.inserts_source +``` + +### Create a ReplicatedMergeTree table in the destination cluster: + +This is the table in the destination cluster that is pointed by the distributed table in the source cluster + +```sql +CREATE TABLE db.inserts_destination ON CLUSTER 'destination' +( + column1 String + column2 DateTime + ..... +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/inserts_destination', '{replica}') +PARTITION BY toYYYYMM(column2) +ORDER BY (column1, column2) +``` + +### Rewrite remote_servers.xml: + +All the hostnames/FQDN from each replica/node must be accessible from both clusters. Also the remote_servers.xml from the source cluster should read like this: + +```xml + + + + + + host03 + 9000 + + + host04 + 9000 + + + + + + + host01 + 9000 + + + host02 + 9000 + + + + + + + + load_balancer.xxxx.com + 9440 + 1 + user + pass + + + + + +``` + +### Configuration settings + +Depending on your use case you can set the the distributed INSERTs to sync or [async mode](/altinity-kb-queries-and-syntax/async-inserts/). This example is for async mode: +Put this config settings on the default profile. Check for more info about the possible modes: + +https://clickhouse.com/docs/en/operations/settings/settings#insert_distributed_sync + +```xml + + .... + + + + 1 + + 1 + + ..... + + +``` diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/fetch_alter_table.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/fetch_alter_table.md index 2a86c623a1..3b428cab6e 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/fetch_alter_table.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/fetch_alter_table.md @@ -50,9 +50,11 @@ If needed, after moving the data and checking that everything is sound, you can ```sql -- Required for DROP REPLICA DETACH TABLE ; --- It will remove everything from /table_path_in_z + +-- This will remove everything from /table_path_in_z/replicas/replica_name -- but not the data. You could reattach the table again and --- restore the replica if needed +-- restore the replica if needed. Get the zookeeper_path and replica_name from system.replicas + SYSTEM DROP REPLICA 'replica_name' FROM ZKPATH '/table_path_in_zk/'; ``` diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/mssql-clickhouse.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/mssql-clickhouse.md new file mode 100644 index 0000000000..4e4d37cf74 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/mssql-clickhouse.md @@ -0,0 +1,101 @@ +--- +title: "MSSQL bcp pipe to clickhouse-client" +linkTitle: "Export from MSSQL to ClickHouse®" +weight: 100 +description: >- + Export from MSSQL to ClickHouse® +--- + +## How to pipe data to ClickHouse® from bcp export tool for MSSQL database + +### Prepare tables + +```bash +LAPTOP.localdomain :) CREATE TABLE tbl(key UInt32) ENGINE=MergeTree ORDER BY key; + +root@LAPTOP:/home/user# sqlcmd -U sa -P Password78 +1> WITH t0(i) AS (SELECT 0 UNION ALL SELECT 0), t1(i) AS (SELECT 0 FROM t0 a, t0 b), t2(i) AS (SELECT 0 FROM t1 a, t1 b), t3(i) AS (SELECT 0 FROM t2 a, t2 b), t4(i) AS (SELECT 0 FROM t3 a, t3 b), t5(i) AS (SELECT 0 FROM t4 a, t3 b),n(i) AS (SELECT ROW_NUMBER() OVER(ORDER BY (SELECT 0)) FROM t5) SELECT i INTO tbl FROM n WHERE i BETWEEN 1 AND 16777216 +2> GO + +(16777216 rows affected) + +root@LAPTOP:/home/user# sqlcmd -U sa -P Password78 -Q "SELECT count(*) FROM tbl" + +----------- + 16777216 + +(1 rows affected) +``` + +### Piping + +```bash +root@LAPTOP:/home/user# mkfifo import_pipe +root@LAPTOP:/home/user# bcp "SELECT * FROM tbl" queryout import_pipe -t, -c -b 200000 -U sa -P Password78 -S localhost & +[1] 6038 +root@LAPTOP:/home/user# +Starting copy... +1000 rows successfully bulk-copied to host-file. Total received: 1000 +1000 rows successfully bulk-copied to host-file. Total received: 2000 +1000 rows successfully bulk-copied to host-file. Total received: 3000 +1000 rows successfully bulk-copied to host-file. Total received: 4000 +1000 rows successfully bulk-copied to host-file. Total received: 5000 +1000 rows successfully bulk-copied to host-file. Total received: 6000 +1000 rows successfully bulk-copied to host-file. Total received: 7000 +1000 rows successfully bulk-copied to host-file. Total received: 8000 +1000 rows successfully bulk-copied to host-file. Total received: 9000 +1000 rows successfully bulk-copied to host-file. Total received: 10000 +1000 rows successfully bulk-copied to host-file. Total received: 11000 +1000 rows successfully bulk-copied to host-file. Total received: 12000 +1000 rows successfully bulk-copied to host-file. Total received: 13000 +1000 rows successfully bulk-copied to host-file. Total received: 14000 +1000 rows successfully bulk-copied to host-file. Total received: 15000 +1000 rows successfully bulk-copied to host-file. Total received: 16000 +1000 rows successfully bulk-copied to host-file. Total received: 17000 +1000 rows successfully bulk-copied to host-file. Total received: 18000 +1000 rows successfully bulk-copied to host-file. Total received: 19000 +1000 rows successfully bulk-copied to host-file. Total received: 20000 +1000 rows successfully bulk-copied to host-file. Total received: 21000 +1000 rows successfully bulk-copied to host-file. Total received: 22000 +1000 rows successfully bulk-copied to host-file. Total received: 23000 +-- Enter +root@LAPTOP:/home/user# cat import_pipe | clickhouse-client --query "INSERT INTO tbl FORMAT CSV" & +... +1000 rows successfully bulk-copied to host-file. Total received: 16769000 +1000 rows successfully bulk-copied to host-file. Total received: 16770000 +1000 rows successfully bulk-copied to host-file. Total received: 16771000 +1000 rows successfully bulk-copied to host-file. Total received: 16772000 +1000 rows successfully bulk-copied to host-file. Total received: 16773000 +1000 rows successfully bulk-copied to host-file. Total received: 16774000 +1000 rows successfully bulk-copied to host-file. Total received: 16775000 +1000 rows successfully bulk-copied to host-file. Total received: 16776000 +1000 rows successfully bulk-copied to host-file. Total received: 16777000 +16777216 rows copied. +Network packet size (bytes): 4096 +Clock Time (ms.) Total : 11540 Average : (1453831.5 rows per sec.) + +[1]- Done bcp "SELECT * FROM tbl" queryout import_pipe -t, -c -b 200000 -U sa -P Password78 -S localhost +[2]+ Done cat import_pipe | clickhouse-client --query "INSERT INTO tbl FORMAT CSV" +``` + +### Another shell + +```bash +root@LAPTOP:/home/user# for i in `seq 1 600`; do clickhouse-client -q "select count() from tbl";sleep 1; done +0 +0 +0 +0 +0 +0 +1048545 +4194180 +6291270 +9436905 +11533995 +13631085 +16777216 +16777216 +16777216 +16777216 +``` diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/remote-table-function.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/remote-table-function.md index f131a47ee6..3c4b39c300 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/remote-table-function.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/remote-table-function.md @@ -33,7 +33,7 @@ While of course it should be checked, every case is different. Yes, by the cost of extra memory usage (on the receiver side). -Clickhouse tries to form blocks of data in memory and while one of limit: `min_insert_block_size_rows` or `min_insert_block_size_bytes` being hit, clickhouse dump this block on disk. If clickhouse tries to execute insert in parallel (`max_insert_threads > 1`), it would form multiple blocks at one time. +ClickHouse® tries to form blocks of data in memory and while one of limit: `min_insert_block_size_rows` or `min_insert_block_size_bytes` being hit, ClickHouse dump this block on disk. If ClickHouse tries to execute insert in parallel (`max_insert_threads > 1`), it would form multiple blocks at one time. So maximum memory usage can be calculated like this: `max_insert_threads * first(min_insert_block_size_rows OR min_insert_block_size_bytes)` Default values: @@ -72,3 +72,44 @@ Default values: │ connect_timeout_with_failover_secure_ms │ 100 │ └─────────────────────────────────────────┴───────┘ ``` + +### Example + +``` +#!/bin/bash + +table='...' +database='bvt' +local='...' +remote='...' +CH="clickhouse-client" # you may add auth here +settings=" max_insert_threads=20, + max_threads=20, + min_insert_block_size_bytes = 536870912, + min_insert_block_size_rows = 16777216, + max_insert_block_size = 16777216, + optimize_on_insert=0"; + +# need it to create temp table with same structure (suitable for attach) +params=$($CH -h $remote -q "select partition_key,sorting_key,primary_key from system.tables where table='$table' and database = '$database' " -f TSV) +IFS=$'\t' read -r partition_key sorting_key primary_key <<< $params + +$CH -h $local \ # get list of source partitions +-q "select distinct partition from system.parts where table='$table' and database = '$database' " + +while read -r partition; do +# check that the partition is already copied + if [ `$CH -h $remote -q " select count() from system.parts table='$table' and database = '$database' and partition='$partition'"` -eq 0 ] ; then + $CH -n -h $remote -q " + create temporary table temp as $database.$table engine=MergeTree -- 23.3 required for temporary table + partition by ($partition_key) primary key ($primary_key) order by ($sorting_key); + -- SYSTEM STOP MERGES temp; -- maybe.... + set $settings; + insert into temp select * from remote($local,$database.$table) where _partition='$partition' + -- order by ($sorting_key) -- maybe.... + ; + alter table $database.$table attach partition $partition from temp + " + fi +done +``` diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/rsync.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/rsync.md index bd4060c53d..b3a496b3fe 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/rsync.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/rsync.md @@ -1,19 +1,66 @@ --- -title: "rsync" +title: "Moving ClickHouse to Another Server" linkTitle: "rsync" description: > - rsync + Copying Multi-Terabyte Live ClickHouse to Another Server --- -### Short Instruction -1. Do [FREEZE TABLE](https://clickhouse.tech/docs/en/sql-reference/statements/alter/partition/#alter_freeze-partition) on needed table, partition. It would produce consistent snapshot of table data. -2. Run rsync command. +When migrating a large, live ClickHouse cluster (multi-terabyte scale) to a new server or cluster, the goal is to minimize downtime while ensuring data consistency. A practical method is to use **incremental `rsync`** in multiple passes, combined with ClickHouse’s replication features. - ```bash - rsync -ravlW --bwlimit=100000 /var/lib/clickhouse/data/shadow/N/database/table - root@remote_host:/var/lib/clickhouse/data/database/table/detached - ``` +1. **Prepare the new cluster** + - Ensure the new cluster is set up with its own ZooKeeper (or Keeper). + - Configure ClickHouse but keep it stopped initially. +2. **Initial data sync** + + Run a full recursive sync of the data directory from the old server to the new one: + + ```bash + rsync -ravlW --delete /var/lib/clickhouse/ user@new_host:/var/lib/clickhouse/ + ``` + + Explanation of flags: + + - `r`: recursive, includes all subdirectories. + - `a`: archive mode (preserves symlinks, permissions, timestamps, ownership, devices). + - `v`: verbose, shows progress. + - `l`: copy symlinks as symlinks. + - `W`: copy whole files instead of using rsync’s delta algorithm (faster for large DB files). + - --delete: remove files from the destination that don’t exist on the source. - `--bwlimit` is transfer limit in KBytes per second. + If you plan to run several replicas on a new cluster, rsync data to all of them. To save the performance of production servers, you can copy data to 1 new replica and then use it as a source for others. However, you can start from a single replica and add more after switching. -3. Run [ATTACH PARTITION](https://clickhouse.tech/docs/en/sql-reference/statements/alter/partition/#alter_attach-partition) for each partition from `./detached` directory. + Add --bwlimit=100000 to preserve the performance of the production cluster while copying a lot of data. + + Consider shards as independent clusters. + +4. **Incremental re-syncs** + - Repeat the `rsync` step multiple times while the old cluster is live. + - Each subsequent run will copy only changes and reduce the final sync time. +5. **Restore replication metadata** + - Start the new ClickHouse node(s). + - Run `SYSTEM RESTORE REPLICA` to rebuild replication metadata in ZooKeeper. +6. **Test the application** + - Point your test environment to the new cluster. + - Validate queries, schema consistency, and application behavior. +7. **Final sync and switchover** + - Stop ClickHouse on the old cluster. + - Immediately run a final incremental `rsync` to catch last-minute changes. + - Reinitialize ZooKeeper/Keeper database (stop/clear snapshots/start). + - Run `SYSTEM RESTORE REPLICA` to rebuild replication metadata in ZooKeeper again. + - Start ClickHouse on the new cluster and switch production traffic. + - add more replicas as needed + + +NOTES: + +1. If you are using a mount point that differs from /var/lib/clickhouse/data, adjust the rsync command accordingly to point to the correct location. For example, suppose you reconfigure the storage path as follows in /etc/clickhouse-server/config.d/config.xml. +``` + + + /data1/clickhouse/ + ... + +``` +You'll need to use `/data1/clickhouse` instead of `/var/lib/clickhouse` in the rsync paths. + +2. ClickHouse docker container image does not have rsync installed. Add it using apt-get or run sidecar in k8s. diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-ddlworker/_index.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-ddlworker/_index.md index 052701a190..0722b0021c 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-ddlworker/_index.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-ddlworker/_index.md @@ -1,20 +1,25 @@ --- -title: "DDLWorker" -linkTitle: "DDLWorker" -description: > - DDLWorker +title: "DDLWorker and DDL queue problems" +linkTitle: "DDLWorker and DDL queue problems" +description: > + Finding and troubleshooting problems in the `distributed_ddl_queue` +keywords: + - clickhouse ddl + - clickhouse replication queue --- -DDLWorker is a subprocess (thread) of clickhouse-server that executes `ON CLUSTER` tasks at the node. +DDLWorker is a subprocess (thread) of `clickhouse-server` that executes `ON CLUSTER` tasks at the node. -When you execute a DDL query with `ON CLUSTER mycluster` section the query executor at the current node reads the cluster `mycluster` definition (remote_servers / system.clusters) and places tasks into Zookeeper znode `task_queue/ddl/...` for members of the cluster `mycluster`. +When you execute a DDL query with `ON CLUSTER mycluster` section, the query executor at the current node reads the cluster `mycluster` definition (remote_servers / system.clusters) and places tasks into Zookeeper znode `task_queue/ddl/...` for members of the cluster `mycluster`. -DDLWorker at all ClickHouse nodes constantly check this `task_queue` for their tasks and executes them locally and reports about a result back into `task_queue`. +DDLWorker at all ClickHouse® nodes constantly check this `task_queue` for their tasks, executes them locally, and reports about the results back into `task_queue`. The common issue is the different hostnames/IPAddresses in the cluster definition and locally. -So a node initiator puts tasks for a host named Host1. But the Host1 thinks about own name as localhost or **xdgt634678d** (internal docker hostname) and never sees tasks for the Host1 because is looking tasks for **xdgt634678d.** The same with internal VS external IP addresses. +So if the initiator node puts tasks for a host named Host1. But the Host1 thinks about own name as localhost or **xdgt634678d** (internal docker hostname) and never sees tasks for the Host1 because is looking tasks for **xdgt634678d.** The same with internal VS external IP addresses. -Another issue that sometimes DDLWorker thread can crash then ClickHouse node stops to execute `ON CLUSTER` tasks. +## DDLWorker thread crashed + +That causes ClickHouse to stop executing `ON CLUSTER` tasks. Check that DDLWorker is alive: @@ -36,6 +41,7 @@ config.xml /clickhouse/task_queue/ddl + 1 1000 604800 60 @@ -50,3 +56,25 @@ Default values: **task_max_lifetime** = 7 \* 24 \* 60 \* 60 (in seconds = week) – Delete task if its age is greater than that. **max_tasks_in_queue** = 1000 – How many tasks could be in the queue. + +**pool_size** = 1 - How many ON CLUSTER queries can be run simultaneously. + +## Too intensive stream of ON CLUSTER command + +Generally, it's a bad design, but you can increase pool_size setting + +## Stuck DDL tasks in the distributed_ddl_queue + +Sometimes [DDL tasks](/altinity-kb-setup-and-maintenance/altinity-kb-ddlworker/) (the ones that use ON CLUSTER) can get stuck in the `distributed_ddl_queue` because the replicas can overload if multiple DDLs (thousands of CREATE/DROP/ALTER) are executed at the same time. This is very normal in heavy ETL jobs.This can be detected by checking the `distributed_ddl_queue` table and see if there are tasks that are not moving or are stuck for a long time. + +If these DDLs are completed in some replicas but failed in others, the simplest way to solve this is to execute the failed command in the missed replicas without ON CLUSTER. If most of the DDLs failed, then check the number of unfinished records in `distributed_ddl_queue` on the other nodes, because most probably it will be as high as thousands. + +First, backup the `distributed_ddl_queue` into a table so you will have a snapshot of the table with the states of the tasks. You can do this with the following command: + +```sql +CREATE TABLE default.system_distributed_ddl_queue AS SELECT * FROM system.distributed_ddl_queue; +``` + +After this, we need to check from the backup table which tasks are not finished and execute them manually in the missed replicas, and review the pipeline which do `ON CLUSTER` command and does not abuse them. There is a new `CREATE TEMPORARY TABLE` command that can be used to avoid the `ON CLUSTER` command in some cases, where you need an intermediate table to do some operations and after that you can `INSERT INTO` the final table or do `ALTER TABLE final ATTACH PARTITION FROM TABLE temp` and this temp table will be dropped automatically after the session is closed. + + diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-ddlworker/there-are-n-unfinished-hosts-0-of-them-are-currently-active.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-ddlworker/there-are-n-unfinished-hosts-0-of-them-are-currently-active.md index 6850e2955b..ca02a38cf7 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-ddlworker/there-are-n-unfinished-hosts-0-of-them-are-currently-active.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-ddlworker/there-are-n-unfinished-hosts-0-of-them-are-currently-active.md @@ -2,13 +2,13 @@ title: "There are N unfinished hosts (0 of them are currently active)." linkTitle: "There are N unfinished hosts (0 of them are currently active)." description: > - "There are N unfinished hosts (0 of them are currently active)." + There are N unfinished hosts (0 of them are currently active). --- Sometimes your Distributed DDL queries are being stuck, and not executing on all or subset of nodes, there are a lot of possible reasons for that kind of behavior, so it would take some time and effort to investigate. ## Possible reasons -### Clickhouse node can't recognize itself +### ClickHouse® node can't recognize itself ```sql SELECT * FROM system.clusters; -- check is_local column, it should have 1 for itself @@ -24,7 +24,7 @@ cat /etc/hostname ### Debian / Ubuntu -There is an issue in Debian based images, when hostname being mapped to 127.0.1.1 address which doesn't literally match network interface and clickhouse fails to detect this address as local. +There is an issue in Debian based images, when hostname being mapped to 127.0.1.1 address which doesn't literally match network interface and ClickHouse fails to detect this address as local. [https://github.com/ClickHouse/ClickHouse/issues/23504](https://github.com/ClickHouse/ClickHouse/issues/23504) @@ -99,7 +99,10 @@ WHERE metric LIKE '%MaxDDLEntryID%' grep -C 40 "ddl_entry" /var/log/clickhouse-server/clickhouse-server*.log ``` -#### Issues that can prevent the task execution + +### Issues that can prevent task execution + +#### Obsolete Replicas Obsolete replicas left in zookeeper. @@ -116,6 +119,8 @@ SYSTEM START REPLICATION QUEUES; [https://clickhouse.tech/docs/en/sql-reference/statements/system/\#query_language-system-drop-replica](https://clickhouse.tech/docs/en/sql-reference/statements/system/\#query_language-system-drop-replica) +#### Tasks manually removed from DDL queue + Task were removed from DDL queue, but left in Replicated\*MergeTree table queue. ```bash @@ -148,3 +153,29 @@ Context of this problem is: Solution: * Reload/Restore this replica from scratch. + +#### DDL path was changed in Zookeeper without restarting ClickHouse + +Changing the DDL queue path in Zookeeper without restarting ClickHouse will make ClickHouse confused. If you need to do this ensure that you restart ClickHouse before submitting additional distributed DDL commands. Here's an example. + +```sql +-- Path before change: +SELECT * +FROM system.zookeeper +WHERE path = '/clickhouse/clickhouse101/task_queue' + +┌─name─┬─value─┬─path─────────────────────────────────┐ +│ ddl │ │ /clickhouse/clickhouse101/task_queue │ +└──────┴───────┴──────────────────────────────────────┘ + +-- Path after change +SELECT * +FROM system.zookeeper +WHERE path = '/clickhouse/clickhouse101/task_queue' + +┌─name─┬─value─┬─path─────────────────────────────────┐ +│ ddl2 │ │ /clickhouse/clickhouse101/task_queue │ +└──────┴───────┴──────────────────────────────────────┘ +``` + +The reason is that ClickHouse will not "see" this change and will continue to look for tasks in the old path. Altering paths in Zookeeper should be avoided if at all possible. If necessary it must be done *very carefully*. diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-memory-configuration-settings.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-memory-configuration-settings.md index 01cce387aa..ab493f097a 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-memory-configuration-settings.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-memory-configuration-settings.md @@ -22,9 +22,18 @@ By default it 90% of the physical RAM of the server. You can decrease that in some scenarios (like you need to leave more free RAM for page cache or to some other software). +### Limits? + +```sql +select metric, formatReadableSize(value) from system.asynchronous_metrics where metric ilike '%MemoryTotal%' +union all +select name, formatReadableSize(toUInt64(value)) from system.server_settings where name='max_server_memory_usage' +FORMAT PrettyCompactMonoBlock +``` + ### How to check what is using my RAM? -[altinity-kb-who-ate-my-memory.md" ]({{}}) +[altinity-kb-who-ate-my-memory.md]({{}}) ### Mark cache diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-memory-overcommit.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-memory-overcommit.md new file mode 100644 index 0000000000..a6a718718d --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-memory-overcommit.md @@ -0,0 +1,49 @@ +--- +title: "Memory Overcommiter" +linkTitle: "Memory Overcommiter" +description: > + Enable Memory overcommiter instead of ussing `max_memory_usage` per query +--- + +## Memory Overcommiter + +From version 22.2+ [ClickHouse® was updated with enhanced Memory overcommit capabilities](https://github.com/ClickHouse/ClickHouse/pull/31182). In the past, queries were constrained by the `max_memory_usage` setting, imposing a rigid limitation. Users had the option to increase this limit, but it came at the potential expense of impacting other users during a single query. With the introduction of Memory overcommit, more memory-intensive queries can now execute, granted there are ample resources available. When the [server reaches its maximum memory limit](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#max_server_memory_usage), ClickHouse identifies the most overcommitted queries and attempts to terminate them. It's important to note that the terminated query might not be the one causing the condition. If it's not, the query will undergo a waiting period to allow the termination of the high-memory query before resuming its execution. This setup ensures that low-memory queries always have the opportunity to run, while more resource-intensive queries can execute during server idle times when resources are abundant. Users have the flexibility to fine-tune this behavior at both the server and user levels. + +If the memory overcommitter is not being used you'll get something like this: + +```bash +Received exception from server (version 22.8.20): +Code: 241. DB::Exception: Received from altinity.cloud:9440. DB::Exception: Received from chi-replica1-2-0:9000. DB::Exception: Memory limit (for query) exceeded: would use 5.00 GiB (attempt to allocate chunk of 4196736 bytes), maximum: 5.00 GiB. OvercommitTracker decision: Memory overcommit isn't used. OvercommitTracker isn't set.: (avg_value_size_hint = 0, avg_chars_size = 1, limit = 8192): while receiving packet from chi-replica1-1-0:9000: While executing Remote. (MEMORY_LIMIT_EXCEEDED) +``` + +So to enable Memory Overcommit you need to get rid of the `max_memory_usage` and `max_memory_usage_for_user` (set them to 0) and configure overcommit specific settings (**usually defaults are ok, so read carefully the documentation**) + +- `memory_overcommit_ratio_denominator`: It represents soft memory limit on the user level. This value is used to compute query overcommit ratio. +- `memory_overcommit_ratio_denominator_for_user`: It represents soft memory limit on the global level. This value is used to compute query overcommit ratio. +- `memory_usage_overcommit_max_wait_microseconds`: Maximum time thread will wait for memory to be freed in the case of memory overcommit. If timeout is reached and memory is not freed, exception is thrown + +Please check https://clickhouse.com/docs/en/operations/settings/memory-overcommit + +Also you will check/need to configure global memory server setting. These are by default: + +```xml + + + 0 + 0.8 + +``` + +With these set, now if you execute some queries with bigger memory needs than your `max_server_memory_usage` you'll get something like this: + +```bash +Received exception from server (version 22.8.20): +Code: 241. DB::Exception: Received from altinity.cloud:9440. DB::Exception: Received from chi-test1-2-0:9000. DB::Exception: Memory limit (total) exceeded: would use 12.60 GiB (attempt to allocate chunk of 4280448 bytes), maximum: 12.60 GiB. OvercommitTracker decision: Query was selected to stop by OvercommitTracker.: while receiving packet from chi-replica1-2-0:9000: While executing Remote. (MEMORY_LIMIT_EXCEEDED) +``` + +This will allow you to know that the Overcommit memory tracker is set and working. + +Also to note that maybe you don't need the Memory Overcommit system because with `max_memory_usage` per query you're ok. + +The good thing about memory overcommit is that you let ClickHouse handle the memory limitations instead of doing it manually, but there may be some scenarios where you don't want to use it and using `max_memory_usage` or `max_memory_usage_for_user` is a better fit. For example, if your workload has a lot of small/medium queries that are not memory intensive and you need to run few memory intensive queries for some users with a fixed memory limit. This is a common scenario for `dbt` or other ETL tools that usually run big memory intensive queries. + diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-monitoring.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-monitoring.md index b38984b580..039af22ea6 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-monitoring.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-monitoring.md @@ -1,17 +1,16 @@ --- -title: "ClickHouse Monitoring" -linkTitle: "ClickHouse Monitoring" +title: "ClickHouse® Monitoring" +linkTitle: "ClickHouse® Monitoring" description: > - ClickHouse Monitoring + Tracking potential issues in your cluster before they cause a critical error +keywords: + - clickhouse monitoring + - clickhouse metrics --- -## ClickHouse Monitoring - -Monitoring helps to track potential issues in your cluster before they cause a critical error. - -What to read / watch on subject: -* Altinity webinar "ClickHouse Monitoring 101: What to monitor and how". [recording](https://www.youtube.com/watch?v=W9KlehhgwLw), [slides](https://www.slideshare.net/Altinity/clickhouse-monitoring-101-what-to-monitor-and-how) -* docs https://clickhouse.com/docs/en/operations/monitoring/ +What to read / watch on the subject: +* Altinity webinar "ClickHouse Monitoring 101: What to monitor and how". [Watch the video](https://www.youtube.com/watch?v=W9KlehhgwLw) or [download the slides](https://www.slideshare.net/Altinity/clickhouse-monitoring-101-what-to-monitor-and-how). +* [The ClickHouse docs](https://clickhouse.com/docs/en/operations/monitoring/) ## What should be monitored @@ -38,34 +37,39 @@ The following metrics should be collected / monitored * [See separate article](../altinity-kb-zookeeper/zookeeper-monitoring/) -## Monitoring tools +## ClickHouse monitoring tools ### Prometheus (embedded exporter) + Grafana * Enable [embedded exporter](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#server_configuration_parameters-prometheus) * Grafana dashboards [https://grafana.com/grafana/dashboards/14192](https://grafana.com/grafana/dashboards/14192) or [https://grafana.com/grafana/dashboards/13500](https://grafana.com/grafana/dashboards/13500) -### clickhouse-operator embedded exporter +### Prometheus (embedded http handler with Altinity Kubernetes Operator for ClickHouse style metrics) + Grafana + +* Enable [http handler](../monitoring-operator-exporter-compatibility/) +* Useful, if you want to use the dashboard from the Altinity Kubernetes Operator for ClickHouse, but do not run ClickHouse in k8s. + +### Prometheus (embedded exporter in the Altinity Kubernetes Operator for ClickHouse) + Grafana -* exporter is included in clickhouse-operator, and enabled automatically +* exporter is included in the Altinity Kubernetes Operator for ClickHouse, and enabled automatically * see instructions of [Prometheus](https://github.com/Altinity/clickhouse-operator/blob/eb3fc4e28514d0d6ea25a40698205b02949bcf9d/docs/prometheus_setup.md) and [Grafana](https://github.com/Altinity/clickhouse-operator/blob/eb3fc4e28514d0d6ea25a40698205b02949bcf9d/docs/grafana_setup.md) installation (if you don't have one) * Grafana dashboard [https://github.com/Altinity/clickhouse-operator/tree/master/grafana-dashboard](https://github.com/Altinity/clickhouse-operator/tree/master/grafana-dashboard) * Prometheus alerts [https://github.com/Altinity/clickhouse-operator/blob/master/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml](https://github.com/Altinity/clickhouse-operator/blob/master/deploy/prometheus/prometheus-alert-rules-clickhouse.yaml) -### Prometheus exporter (external) + Grafana +### Prometheus (ClickHouse external exporter) + Grafana * [clickhouse-exporter](https://github.com/ClickHouse/clickhouse_exporter) * Dashboard: https://grafana.com/grafana/dashboards/882 (unmaintained) -### Dashboards quering clickhouse directly via vertamedia / Altinity plugin +### Dashboards querying ClickHouse directly via vertamedia / Altinity plugin * Overview: [https://grafana.com/grafana/dashboards/13606](https://grafana.com/grafana/dashboards/13606) * Queries dashboard (analyzing system.query_log) https://grafana.com/grafana/dashboards/2515 -## Dashboard quering clickhouse directly via Grafana plugin +## Dashboard querying ClickHouse directly via Grafana plugin * https://grafana.com/blog/2022/05/05/introducing-the-official-clickhouse-plugin-for-grafana/ @@ -94,14 +98,14 @@ The following metrics should be collected / monitored * site24x7 https://www.site24x7.com/plugins/clickhouse-monitoring.html * Acceldata Pulse https://www.acceldata.io/blog/acceldata-pulse-for-clickhouse-monitoring -### "Build your own" monitoring +### "Build your own" ClickHouse monitoring -ClickHouse allow to access lot of internals using system tables. The main tables to access monitoring data are: +ClickHouse allows to access lots of internals using system tables. The main tables to access monitoring data are: * system.metrics * system.asynchronous_metrics * system.events -Minimum neccessary set of checks +Minimum necessary set of checks @@ -305,13 +309,13 @@ The following queries are recommended to be included in monitoring: * `SELECT * FROM system.mutations` * This is the source of information on the speed and progress of currently executed merges. -## Logs monitoring +## Monitoring ClickHouse logs -ClickHouse logs can be another important source of information. There are 2 logs enabled by default +[ClickHouse logs](/altinity-kb-setup-and-maintenance/logging/) can be another important source of information. There are 2 logs enabled by default * /var/log/clickhouse-server/clickhouse-server.err.log (error & warning, you may want to keep an eye on that or send it to some monitoring system) * /var/log/clickhouse-server/clickhouse-server.log (trace logs, very detailed, useful for debugging, usually too verbose to monitor). -You can additionally enable system.text_log table to have an access to the logs from clickhouse sql queries (ensure that you will not expose some information to the users which should not see it). +You can additionally enable system.text_log table to have an access to the logs from clickhouse sql queries (ensure that you will not expose some information to the users who should not see it). ``` $ cat /etc/clickhouse-server/config.d/text_log.xml @@ -332,11 +336,6 @@ See https://clickhouse.com/docs/en/operations/opentelemetry/ * [https://tech.marksblogg.com/clickhouse-prometheus-grafana.html](https://tech.marksblogg.com/clickhouse-prometheus-grafana.html) * [Key Metrics for Monitoring ClickHouse](https://sematext.com/blog/clickhouse-monitoring-key-metrics/) -* [ClickHouse Monitoring Key Metrics to Monitor](https://dzone.com/articles/clickhouse-monitoring-key-metrics-to-monitor-semat) -* [ClickHouse Monitoring Tools: Five Tools to Consider](https://dzone.com/articles/clickhouse-monitoring-tools-five-tools-to-consider) -* [Monitoring ClickHouse](https://docs.instana.io/ecosystem/clickhouse/) * [Monitor ClickHouse with Datadog](https://www.datadoghq.com/blog/monitor-clickhouse/) * [Unsorted notes on monitor and Alerts](https://docs.google.com/spreadsheets/d/1K92yZr5slVQEvDglfZ88k_7bfsAKqahY9RPp_2tSdVU/edit#gid=521173956) * https://intl.cloud.tencent.com/document/product/1026/36887 -* https://chowdera.com/2021/03/20210301161806704Y.html -* https://chowdera.com/2021/03/20210301160252465m.html# diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-moving-table-to-another-device..md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-moving-table-to-another-device..md index ad29cdea59..9303dccce4 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-moving-table-to-another-device..md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-moving-table-to-another-device..md @@ -1,12 +1,12 @@ --- -title: "Moving table to another device." -linkTitle: "Moving table to another device." +title: "Moving a table to another device" +linkTitle: "Moving a table to another device" description: > - Moving table to another device. + Moving a table to another device. --- Suppose we mount a new device at path `/mnt/disk_1` and want to move `table_4` to it. -1. Create directory on new device for ClickHouse data. /in shell `mkdir /mnt/disk_1/clickhouse` +1. Create directory on new device for ClickHouse® data. /in shell `mkdir /mnt/disk_1/clickhouse` 2. Change ownership of created directory to ClickHouse user. /in shell `chown -R clickhouse:clickhouse /mnt/disk_1/clickhouse` 3. Create a special storage policy which should include both disks: old and new. /in shell diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-object-consistency-in-a-cluster.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-object-consistency-in-a-cluster.md index bebc188167..ca39c542df 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-object-consistency-in-a-cluster.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-object-consistency-in-a-cluster.md @@ -68,12 +68,14 @@ WITH ( SELECT database, dictionary, arrayFilter( i-> NOT has(groupArray(host),i), hosts) miss_dict, - arrayReduce('median', (groupArray((element_count, host)) AS ec).1 ) + arrayReduce('min', (groupArray((element_count, host)) AS ec).1) min, + arrayReduce('max', (groupArray((element_count, host)) AS ec).1) max FROM ( SELECT FQDN() host, database, name dictionary, element_count FROM clusterAllReplicas('{cluster}',system,dictionaries) ) GROUP BY database, dictionary -HAVING miss_dict <> [] +HAVING miss_dict <> [] or min <> max SETTINGS skip_unavailable_shards=1; +; ``` diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/aws-s3-recipes.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/aws-s3-recipes.md index c890aa49b0..4ad9709bcf 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/aws-s3-recipes.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/aws-s3-recipes.md @@ -31,7 +31,7 @@ Role shall contain a policy with permissions like: } ``` -Corresponding configuration of ClickHouse: +Corresponding configuration of ClickHouse®: ```xml @@ -64,3 +64,48 @@ INSERT INTO table_s3 SELECT * FROM system.numbers LIMIT 100000000; SELECT * FROM table_s3; DROP TABLE table_s3; ``` + +## How to use AWS IRSA and IAM in the Altinity Kubernetes Operator for ClickHouse to allow S3 backup without Explicit credentials + +Install `clickhouse-operator` https://github.com/Altinity/clickhouse-operator/tree/master/docs/operator_installation_details.md + +Create Role and IAM Policy, look details in https://docs.aws.amazon.com/emr/latest/EMR-on-EKS-DevelopmentGuide/setting-up-enable-IAM.html + +Create service account with annotations +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: + namespace: + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam:::role/ +``` + +Link service account to podTemplate it will create `AWS_ROLE_ARN` and `AWS_WEB_IDENTITY_TOKEN_FILE` environment variables. +```yaml +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" +metadata: + name: + namespace: +spec: + defaults: + templates: + podTemplate: + templates: + podTemplates: + - name: + spec: + serviceAccountName: + containers: + - name: clickhouse-backup +``` + +For EC2 instances the same environment variables should be created: + +``` +AWS_ROLE_ARN=arn:aws:iam:::role/ +AWS_WEB_IDENTITY_TOKEN_FILE=/var/run/secrets/eks.amazonaws.com/serviceaccount/token +``` + diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/clean-up-orphaned-objects-on-s3.md.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/clean-up-orphaned-objects-on-s3.md.md new file mode 100644 index 0000000000..2c80a64f67 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/clean-up-orphaned-objects-on-s3.md.md @@ -0,0 +1,26 @@ +--- +title: "Clean up orphaned objects on s3" +linkTitle: "Clean up orphaned objects on s3" +weight: 100 +description: >- + Clean up orphaned objects left in an S3-backed ClickHouse tiered‐storage +--- + +### Problems + +- TRUNCATE and DROP TABLE remove **metadata only**. +- Long-running queries, merges or other replicas may still reference parts, so ClickHouse delays removal. +- There are bugs in Clickhouse that leave orphaned files, especially after failures. + +### Solutions + +- use our utility for garbage collection - https://github.com/Altinity/s3gc +- or create a separate path in the bucket for every table and every replica and remove the whole path in AWS console +- you can also use [clickhouse-disk](https://clickhouse.com/docs/operations/utilities/clickhouse-disks) utility to delete s3 data: + +``` +clickhouse-disks --disk s3 --query "remove /cluster/database/table/replica1" +``` + + + diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/s3_and_mutations.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/s3_and_mutations.md new file mode 100644 index 0000000000..7554895d49 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/s3_and_mutations.md @@ -0,0 +1,241 @@ +--- +title: "How much data are written to S3 during mutations" +linkTitle: "s3 and mutations" +weight: 100 +description: >- + Example of how much data ClickHouse® reads and writes to s3 during mutations. +--- + +## Configuration + +S3 disk with disabled merges + +```xml + + + + + s3 + https://s3.us-east-1.amazonaws.com/mybucket/test/test/ + 1 + + + + + + + + default + + + s3disk + true + + + + + + +``` + +Let's create a table and load some synthetic data. + +```sql +CREATE TABLE test_s3 +( + `A` Int64, + `S` String, + `D` Date +) +ENGINE = MergeTree +PARTITION BY D +ORDER BY A +SETTINGS storage_policy = 's3tiered'; + +insert into test_s3 select number, number, today() - intDiv(number, 10000000) from numbers(7e8); +0 rows in set. Elapsed: 98.091 sec. Processed 700.36 million rows, 5.60 GB (7.14 million rows/s., 57.12 MB/s.) + + +select disk_name, partition, sum(rows), formatReadableSize(sum(bytes_on_disk)) size, count() part_count +from system.parts where table= 'test_s3' and active +group by disk_name, partition +order by partition; + +┌─disk_name─┬─partition──┬─sum(rows)─┬─size──────┬─part_count─┐ +│ default │ 2023-05-06 │ 10000000 │ 78.23 MiB │ 5 │ +│ default │ 2023-05-07 │ 10000000 │ 78.31 MiB │ 6 │ +│ default │ 2023-05-08 │ 10000000 │ 78.16 MiB │ 5 │ +.... +│ default │ 2023-07-12 │ 10000000 │ 78.21 MiB │ 5 │ +│ default │ 2023-07-13 │ 10000000 │ 78.23 MiB │ 6 │ +│ default │ 2023-07-14 │ 10000000 │ 77.39 MiB │ 5 │ +└───────────┴────────────┴───────────┴───────────┴────────────┘ +70 rows in set. Elapsed: 0.023 sec. +``` + +## Performance of mutations for a local EBS (throughput: 500 MB/s) + +```sql +select * from test_s3 where A=490000000; +1 row in set. Elapsed: 0.020 sec. Processed 8.19 thousand rows, 92.67 KB (419.17 thousand rows/s., 4.74 MB/s.) + +select * from test_s3 where S='490000000'; +1 row in set. Elapsed: 14.117 sec. Processed 700.00 million rows, 12.49 GB (49.59 million rows/s., 884.68 MB/s.) + +delete from test_s3 where S = '490000000'; +0 rows in set. Elapsed: 22.192 sec. + +delete from test_s3 where A = '490000001'; +0 rows in set. Elapsed: 2.243 sec. + +alter table test_s3 delete where S = 590000000 settings mutations_sync=2; +0 rows in set. Elapsed: 21.387 sec. + +alter table test_s3 delete where A = '590000001' settings mutations_sync=2; +0 rows in set. Elapsed: 3.372 sec. + +alter table test_s3 update S='' where S = '690000000' settings mutations_sync=2; +0 rows in set. Elapsed: 20.265 sec. + +alter table test_s3 update S='' where A = '690000001' settings mutations_sync=2; +0 rows in set. Elapsed: 1.979 sec. +``` + +## Let's move data to S3 + +```sql +alter table test_s3 modify TTL D + interval 10 day to disk 's3disk'; + +-- 10 minutes later +┌─disk_name─┬─partition──┬─sum(rows)─┬─size──────┬─part_count─┐ +│ s3disk │ 2023-05-06 │ 10000000 │ 78.23 MiB │ 5 │ +│ s3disk │ 2023-05-07 │ 10000000 │ 78.31 MiB │ 6 │ +│ s3disk │ 2023-05-08 │ 10000000 │ 78.16 MiB │ 5 │ +│ s3disk │ 2023-05-09 │ 10000000 │ 78.21 MiB │ 6 │ +│ s3disk │ 2023-05-10 │ 10000000 │ 78.21 MiB │ 6 │ +... +│ s3disk │ 2023-07-02 │ 10000000 │ 78.22 MiB │ 5 │ +... +│ default │ 2023-07-11 │ 10000000 │ 78.20 MiB │ 6 │ +│ default │ 2023-07-12 │ 10000000 │ 78.21 MiB │ 5 │ +│ default │ 2023-07-13 │ 10000000 │ 78.23 MiB │ 6 │ +│ default │ 2023-07-14 │ 10000000 │ 77.40 MiB │ 5 │ +└───────────┴────────────┴───────────┴───────────┴────────────┘ +70 rows in set. Elapsed: 0.007 sec. +``` + +### Sizes of a table on S3 and a size of each column +``` +select sum(rows), formatReadableSize(sum(bytes_on_disk)) size +from system.parts where table= 'test_s3' and active and disk_name = 's3disk'; +┌─sum(rows)─┬─size─────┐ +│ 600000000 │ 4.58 GiB │ +└───────────┴──────────┘ + +SELECT + database, + table, + column, + formatReadableSize(sum(column_data_compressed_bytes) AS size) AS compressed +FROM system.parts_columns +WHERE (active = 1) AND (database LIKE '%') AND (table LIKE 'test_s3') AND (disk_name = 's3disk') +GROUP BY + database, + table, + column +ORDER BY column ASC + +┌─database─┬─table───┬─column─┬─compressed─┐ +│ default │ test_s3 │ A │ 2.22 GiB │ +│ default │ test_s3 │ D │ 5.09 MiB │ +│ default │ test_s3 │ S │ 2.33 GiB │ +└──────────┴─────────┴────────┴────────────┘ +``` + +## S3 Statistics of selects + +```sql +select *, _part from test_s3 where A=100000000; +┌─────────A─┬─S─────────┬──────────D─┬─_part──────────────────┐ +│ 100000000 │ 100000000 │ 2023-07-08 │ 20230708_106_111_1_738 │ +└───────────┴───────────┴────────────┴────────────────────────┘ +1 row in set. Elapsed: 0.104 sec. Processed 8.19 thousand rows, 65.56 KB (79.11 thousand rows/s., 633.07 KB/s.) + +┌─S3GetObject─┬─S3PutObject─┬─ReadBufferFromS3─┬─WriteBufferFromS3─┐ +│ 6 │ 0 │ 70.58 KiB │ 0.00 B │ +└─────────────┴─────────────┴──────────────────┴───────────────────┘ +``` + +Select by primary key read only 70.58 KiB from S3 + +Size of this part + +```sql +SELECT + database, table, column, + formatReadableSize(sum(column_data_compressed_bytes) AS size) AS compressed +FROM system.parts_columns +WHERE (active = 1) AND (database LIKE '%') AND (table LIKE 'test_s3') AND (disk_name = 's3disk') + and name = '20230708_106_111_1_738' +GROUP BY database, table, column ORDER BY column ASC + +┌─database─┬─table───┬─column─┬─compressed─┐ +│ default │ test_s3 │ A │ 22.51 MiB │ +│ default │ test_s3 │ D │ 51.47 KiB │ +│ default │ test_s3 │ S │ 23.52 MiB │ +└──────────┴─────────┴────────┴────────────┘ +``` + +```sql +select * from test_s3 where S='100000000'; +┌─────────A─┬─S─────────┬──────────D─┐ +│ 100000000 │ 100000000 │ 2023-07-08 │ +└───────────┴───────────┴────────────┘ +1 row in set. Elapsed: 86.745 sec. Processed 700.00 million rows, 12.49 GB (8.07 million rows/s., 144.04 MB/s.) + +┌─S3GetObject─┬─S3PutObject─┬─ReadBufferFromS3─┬─WriteBufferFromS3─┐ +│ 947 │ 0 │ 2.36 GiB │ 0.00 B │ +└─────────────┴─────────────┴──────────────────┴───────────────────┘ +``` +Select using fullscan of S column read only 2.36 GiB from S3, the whole S column (2.33 GiB) plus parts of A and D. + + +``` + +delete from test_s3 where A=100000000; +0 rows in set. Elapsed: 17.429 sec. + +┌─q──┬─S3GetObject─┬─S3PutObject─┬─ReadBufferFromS3─┬─WriteBufferFromS3─┐ +│ Q3 │ 2981 │ 6 │ 23.06 MiB │ 27.25 KiB │ +└────┴─────────────┴─────────────┴──────────────────┴───────────────────┘ + +insert into test select 'Q3' q, event,value from system.events where event like '%S3%'; + + +delete from test_s3 where S='100000001'; +0 rows in set. Elapsed: 31.417 sec. +┌─q──┬─S3GetObject─┬─S3PutObject─┬─ReadBufferFromS3─┬─WriteBufferFromS3─┐ +│ Q4 │ 4209 │ 6 │ 2.39 GiB │ 27.25 KiB │ +└────┴─────────────┴─────────────┴──────────────────┴───────────────────┘ +insert into test select 'Q4' q, event,value from system.events where event like '%S3%'; + + + +alter table test_s3 delete where A=110000000 settings mutations_sync=2; +0 rows in set. Elapsed: 19.521 sec. + +┌─q──┬─S3GetObject─┬─S3PutObject─┬─ReadBufferFromS3─┬─WriteBufferFromS3─┐ +│ Q5 │ 2986 │ 15 │ 42.27 MiB │ 41.72 MiB │ +└────┴─────────────┴─────────────┴──────────────────┴───────────────────┘ +insert into test select 'Q5' q, event,value from system.events where event like '%S3%'; + + +alter table test_s3 delete where S='110000001' settings mutations_sync=2; +0 rows in set. Elapsed: 29.650 sec. + +┌─q──┬─S3GetObject─┬─S3PutObject─┬─ReadBufferFromS3─┬─WriteBufferFromS3─┐ +│ Q6 │ 4212 │ 15 │ 2.42 GiB │ 41.72 MiB │ +└────┴─────────────┴─────────────┴──────────────────┴───────────────────┘ +insert into test select 'Q6' q, event,value from system.events where event like '%S3%'; +``` diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/s3_cache_example.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/s3_cache_example.md new file mode 100644 index 0000000000..e7db2700cc --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/s3_cache_example.md @@ -0,0 +1,318 @@ +--- +title: "Example of the table at s3 with cache" +linkTitle: "s3 cached table" +weight: 100 +description: >- + s3 disk and s3 cache. +--- + +## Storage configuration +```xml +cat /etc/clickhouse-server/config.d/s3.xml + + + + + s3 + https://s3.us-east-1.amazonaws.com/mybucket/test/s3cached/ + 1 + + + + cache + s3disk + /var/lib/clickhouse/disks/s3_cache/ + 50Gi + + + + + + + default + 50000000000 + + + cache + + + + + + + +``` + +```sql +select * from system.disks +┌─name────┬─path──────────────────────────────┬───────────free_space─┬──────────total_space─┬ +│ cache │ /var/lib/clickhouse/disks/s3disk/ │ 18446744073709551615 │ 18446744073709551615 │ +│ default │ /var/lib/clickhouse/ │ 149113987072 │ 207907635200 │ +│ s3disk │ /var/lib/clickhouse/disks/s3disk/ │ 18446744073709551615 │ 18446744073709551615 │ +└─────────┴───────────────────────────────────┴──────────────────────┴──────────────────────┴ + +select * from system.storage_policies; +┌─policy_name─┬─volume_name─┬─volume_priority─┬─disks───────┬─volume_type─┬─max_data_part_size─┬─move_factor─┬─prefer_not_to_merge─┐ +│ default │ default │ 1 │ ['default'] │ JBOD │ 0 │ 0 │ 0 │ +│ s3tiered │ default │ 1 │ ['default'] │ JBOD │ 50000000000 │ 0.1 │ 0 │ +│ s3tiered │ s3cached │ 2 │ ['s3disk'] │ JBOD │ 0 │ 0.1 │ 0 │ +└─────────────┴─────────────┴─────────────────┴─────────────┴─────────────┴────────────────────┴─────────────┴─────────────────────┘ +``` + +## example with a new table + +```sql +CREATE TABLE test_s3 +( + `A` Int64, + `S` String, + `D` Date +) +ENGINE = MergeTree +PARTITION BY D +ORDER BY A +SETTINGS storage_policy = 's3tiered'; + +insert into test_s3 select number, number, '2023-01-01' from numbers(1e9); + +0 rows in set. Elapsed: 270.285 sec. Processed 1.00 billion rows, 8.00 GB (3.70 million rows/s., 29.60 MB/s.) +``` + +Table size is 7.65 GiB and it at the default disk (EBS): +```sql +select disk_name, partition, sum(rows), formatReadableSize(sum(bytes_on_disk)) size, count() part_count +from system.parts where table= 'test_s3' and active +group by disk_name, partition; +┌─disk_name─┬─partition──┬──sum(rows)─┬─size─────┬─part_count─┐ +│ default │ 2023-01-01 │ 1000000000 │ 7.65 GiB │ 8 │ +└───────────┴────────────┴────────────┴──────────┴────────────┘ +``` + +It seems my EBS write speed is slower than S3 write speed: +```sql +alter table test_s3 move partition '2023-01-01' to volume 's3cached'; +0 rows in set. Elapsed: 98.979 sec. + +alter table test_s3 move partition '2023-01-01' to volume 'default'; +0 rows in set. Elapsed: 127.741 sec. +``` + +Queries performance against EBS: +```sql +select * from test_s3 where A = 443; +1 row in set. Elapsed: 0.002 sec. Processed 8.19 thousand rows, 71.64 KB (3.36 million rows/s., 29.40 MB/s.) + +select uniq(A) from test_s3; +1 row in set. Elapsed: 11.439 sec. Processed 1.00 billion rows, 8.00 GB (87.42 million rows/s., 699.33 MB/s.) + +select count() from test_s3 where S like '%4422%' +1 row in set. Elapsed: 17.484 sec. Processed 1.00 billion rows, 17.89 GB (57.20 million rows/s., 1.02 GB/s.) +``` + +Let's move data to S3 +```sql +alter table test_s3 move partition '2023-01-01' to volume 's3cached'; +0 rows in set. Elapsed: 81.068 sec. + +select disk_name, partition, sum(rows), formatReadableSize(sum(bytes_on_disk)) size, count() part_count +from system.parts where table= 'test_s3' and active +group by disk_name, partition; +┌─disk_name─┬─partition──┬──sum(rows)─┬─size─────┬─part_count─┐ +│ s3disk │ 2023-01-01 │ 1000000000 │ 7.65 GiB │ 8 │ +└───────────┴────────────┴────────────┴──────────┴────────────┘ +``` + +The first query execution against S3, the second against the cache (local EBS): +```sql +select * from test_s3 where A = 443; +1 row in set. Elapsed: 0.458 sec. Processed 8.19 thousand rows, 71.64 KB (17.88 thousand rows/s., 156.35 KB/s.) +1 row in set. Elapsed: 0.003 sec. Processed 8.19 thousand rows, 71.64 KB (3.24 million rows/s., 28.32 MB/s.) + +select uniq(A) from test_s3; +1 row in set. Elapsed: 26.601 sec. Processed 1.00 billion rows, 8.00 GB (37.59 million rows/s., 300.74 MB/s.) +1 row in set. Elapsed: 8.675 sec. Processed 1.00 billion rows, 8.00 GB (115.27 million rows/s., 922.15 MB/s.) + +select count() from test_s3 where S like '%4422%' +1 row in set. Elapsed: 33.586 sec. Processed 1.00 billion rows, 17.89 GB (29.77 million rows/s., 532.63 MB/s.) +1 row in set. Elapsed: 16.551 sec. Processed 1.00 billion rows, 17.89 GB (60.42 million rows/s., 1.08 GB/s.) +``` + +Cache introspection +```sql +select cache_base_path, formatReadableSize(sum(size)) from system.filesystem_cache group by 1; +┌─cache_base_path─────────────────────┬─formatReadableSize(sum(size))─┐ +│ /var/lib/clickhouse/disks/s3_cache/ │ 7.64 GiB │ +└─────────────────────────────────────┴───────────────────────────────┘ + +system drop FILESYSTEM cache; + +select cache_base_path, formatReadableSize(sum(size)) from system.filesystem_cache group by 1; +0 rows in set. Elapsed: 0.005 sec. + +select * from test_s3 where A = 443; +1 row in set. Elapsed: 0.221 sec. Processed 8.19 thousand rows, 71.64 KB (37.10 thousand rows/s., 324.47 KB/s.) + +select cache_base_path, formatReadableSize(sum(size)) from system.filesystem_cache group by 1; +┌─cache_base_path─────────────────────┬─formatReadableSize(sum(size))─┐ +│ /var/lib/clickhouse/disks/s3_cache/ │ 105.95 KiB │ +└─────────────────────────────────────┴───────────────────────────────┘ +``` + +No data is stored locally (except system log tables). +```sql +select name, formatReadableSize(free_space) free_space, formatReadableSize(total_space) total_space from system.disks; +┌─name────┬─free_space─┬─total_space─┐ +│ cache │ 16.00 EiB │ 16.00 EiB │ +│ default │ 48.97 GiB │ 49.09 GiB │ +│ s3disk │ 16.00 EiB │ 16.00 EiB │ +└─────────┴────────────┴─────────────┘ +``` + +## example with an existing table + +The `mydata` table is created without the explicitly defined `storage_policy`, it means that implicitly `storage_policy=default` / `volume=default` / `disk=default`. + +```sql +select disk_name, partition, sum(rows), formatReadableSize(sum(bytes_on_disk)) size, count() part_count +from system.parts where table='mydata' and active +group by disk_name, partition +order by partition; +┌─disk_name─┬─partition─┬─sum(rows)─┬─size───────┬─part_count─┐ +│ default │ 202201 │ 516666677 │ 4.01 GiB │ 13 │ +│ default │ 202202 │ 466666657 │ 3.64 GiB │ 13 │ +│ default │ 202203 │ 16666666 │ 138.36 MiB │ 10 │ +│ default │ 202301 │ 516666677 │ 4.01 GiB │ 10 │ +│ default │ 202302 │ 466666657 │ 3.64 GiB │ 10 │ +│ default │ 202303 │ 16666666 │ 138.36 MiB │ 10 │ +└───────────┴───────────┴───────────┴────────────┴────────────┘ + +-- Let's change the storage policy, this command instant and changes only metadata of the table, and possible because the new storage policy and the old has the volume `default`. + +alter table mydata modify setting storage_policy = 's3tiered'; + +0 rows in set. Elapsed: 0.057 sec. +``` + +### straightforward (heavy) approach + +```sql +-- Let's add TTL, it's a heavy command and takes a lot time and creates the performance impact, because it reads `D` column and moves parts to s3. +alter table mydata modify TTL D + interval 1 year to volume 's3cached'; + +0 rows in set. Elapsed: 140.661 sec. + +┌─disk_name─┬─partition─┬─sum(rows)─┬─size───────┬─part_count─┐ +│ s3disk │ 202201 │ 516666677 │ 4.01 GiB │ 13 │ +│ s3disk │ 202202 │ 466666657 │ 3.64 GiB │ 13 │ +│ s3disk │ 202203 │ 16666666 │ 138.36 MiB │ 10 │ +│ default │ 202301 │ 516666677 │ 4.01 GiB │ 10 │ +│ default │ 202302 │ 466666657 │ 3.64 GiB │ 10 │ +│ default │ 202303 │ 16666666 │ 138.36 MiB │ 10 │ +└───────────┴───────────┴───────────┴────────────┴────────────┘ +``` + +### gentle (manual) approach + +```sql +-- alter modify TTL changes only metadata of the table and applied to only newly insterted data. +set materialize_ttl_after_modify=0; +alter table mydata modify TTL D + interval 1 year to volume 's3cached'; +0 rows in set. Elapsed: 0.049 sec. + +-- move data slowly partition by partition + +alter table mydata move partition id '202201' to volume 's3cached'; +0 rows in set. Elapsed: 49.410 sec. + +alter table mydata move partition id '202202' to volume 's3cached'; +0 rows in set. Elapsed: 36.952 sec. + +alter table mydata move partition id '202203' to volume 's3cached'; +0 rows in set. Elapsed: 4.808 sec. + +-- data can be optimized to reduce number of parts before moving it to s3 +optimize table mydata partition id '202301' final; +0 rows in set. Elapsed: 66.551 sec. + +alter table mydata move partition id '202301' to volume 's3cached'; +0 rows in set. Elapsed: 33.332 sec. + +┌─disk_name─┬─partition─┬─sum(rows)─┬─size───────┬─part_count─┐ +│ s3disk │ 202201 │ 516666677 │ 4.01 GiB │ 13 │ +│ s3disk │ 202202 │ 466666657 │ 3.64 GiB │ 13 │ +│ s3disk │ 202203 │ 16666666 │ 138.36 MiB │ 10 │ +│ s3disk │ 202301 │ 516666677 │ 4.01 GiB │ 1 │ -- optimized partition +│ default │ 202302 │ 466666657 │ 3.64 GiB │ 13 │ +│ default │ 202303 │ 16666666 │ 138.36 MiB │ 10 │ +└───────────┴───────────┴───────────┴────────────┴────────────┘ +``` + +## S3 and ClickHouse® start time + +Let's create a table with 1000 parts and move them to s3. +```sql +CREATE TABLE test_s3( A Int64, S String, D Date) +ENGINE = MergeTree PARTITION BY D ORDER BY A +SETTINGS storage_policy = 's3tiered'; + +insert into test_s3 select number, number, toDate('2000-01-01') + intDiv(number,1e6) from numbers(1e9); +optimize table test_s3 final settings optimize_skip_merged_partitions = 1; + +select disk_name, sum(rows), formatReadableSize(sum(bytes_on_disk)) size, count() part_count +from system.parts where table= 'test_s3' and active group by disk_name; +┌─disk_name─┬──sum(rows)─┬─size─────┬─part_count─┐ +│ default │ 1000000000 │ 7.64 GiB │ 1000 │ +└───────────┴────────────┴──────────┴────────────┘ + +alter table test_s3 modify ttl D + interval 1 year to disk 's3disk'; + +select disk_name, sum(rows), formatReadableSize(sum(bytes_on_disk)) size, count() part_count +from system.parts where table= 'test_s3' and active +group by disk_name; +┌─disk_name─┬─sum(rows)─┬─size─────┬─part_count─┐ +│ default │ 755000000 │ 5.77 GiB │ 755 │ +│ s3disk │ 245000000 │ 1.87 GiB │ 245 │ +└───────────┴───────────┴──────────┴────────────┘ + +---- several minutes later ---- + +┌─disk_name─┬──sum(rows)─┬─size─────┬─part_count─┐ +│ s3disk │ 1000000000 │ 7.64 GiB │ 1000 │ +└───────────┴────────────┴──────────┴────────────┘ +``` + +### start time + +```text +:) select name, value from system.merge_tree_settings where name = 'max_part_loading_threads'; +┌─name─────────────────────┬─value─────┐ +│ max_part_loading_threads │ 'auto(4)' │ +└──────────────────────────┴───────────┘ + +# systemctl stop clickhouse-server +# time systemctl start clickhouse-server / real 4m26.766s +# systemctl stop clickhouse-server +# time systemctl start clickhouse-server / real 4m24.263s + +# cat /etc/clickhouse-server/config.d/max_part_loading_threads.xml + + + + 128 + + + +# systemctl stop clickhouse-server +# time systemctl start clickhouse-server / real 0m11.225s +# systemctl stop clickhouse-server +# time systemctl start clickhouse-server / real 0m10.797s + + 256 + +# systemctl stop clickhouse-server +# time systemctl start clickhouse-server / real 0m8.474s +# systemctl stop clickhouse-server +# time systemctl start clickhouse-server / real 0m8.130s +``` diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/s3disk.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/s3disk.md index 9c0ba5e40f..38168d365f 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/s3disk.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-s3-object-storage/s3disk.md @@ -27,7 +27,7 @@ description: >- * skip_access_check — if true, it's possible to use read only credentials with regular MergeTree table. But you would need to disable merges (`prefer_not_to_merge` setting) on s3 volume as well. -* send_metadata — if true, ClickHouse will populate s3 object with initial part & file path, which allow you to recover metadata from s3 and make debug easier. +* send_metadata — if true, ClickHouse® will populate s3 object with initial part & file path, which allow you to recover metadata from s3 and make debug easier. ## Restore metadata from S3 @@ -62,6 +62,6 @@ source_path=vol1/ ## Links -https://altinity.com/blog/integrating-clickhouse-with-minio -https://altinity.com/blog/clickhouse-object-storage-performance-minio-vs-aws-s3 -https://altinity.com/blog/tips-for-high-performance-clickhouse-clusters-with-s3-object-storage +* https://altinity.com/blog/integrating-clickhouse-with-minio +* https://altinity.com/blog/clickhouse-object-storage-performance-minio-vs-aws-s3 +* https://altinity.com/blog/tips-for-high-performance-clickhouse-clusters-with-s3-object-storage diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files.md index 524f693a53..51a80362cb 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files.md @@ -1,24 +1,30 @@ --- -title: "Server config files" +title: "Server configuration files" linkTitle: "Server config files" description: > - How to manage server config files in Clickhouse + How to organize configuration files in ClickHouse® and how to manage changes +keywords: + - clickhouse config.xml + - clickhouse configuration +weight: 105 --- ## Сonfig management (recommended structure) -Clickhouse server config consists of two parts server settings (config.xml) and users settings (users.xml). +ClickHouse® server config consists of two parts server settings (config.xml) and users settings (users.xml). By default they are stored in the folder **/etc/clickhouse-server/** in two files config.xml & users.xml. -We suggest never change vendor config files and place your changes into separate .xml files in sub-folders. This way is easier to maintain and ease Clickhouse upgrades. +We suggest never change vendor config files and place your changes into separate .xml files in sub-folders. This way is easier to maintain and ease ClickHouse upgrades. -**/etc/clickhouse-server/users.d** – sub-folder for user settings. +**/etc/clickhouse-server/users.d** – sub-folder for [user settings](/altinity-kb-setup-and-maintenance/rbac/) (derived from `users.xml` filename). -**/etc/clickhouse-server/config.d** – sub-folder for server settings. +**/etc/clickhouse-server/config.d** – sub-folder for server settings (derived from `config.xml` filename). **/etc/clickhouse-server/conf.d** – sub-folder for any (both) settings. +If the root config (xml or yaml) has a different name, such as `keeper_config.xml` or `config_instance_66.xml`, then the `keeper_config.d` and `config_instance_66.d` folders will be used. But `conf.d` is always used and processed last. + File names of your xml files can be arbitrary but they are applied in alphabetical order. Examples: @@ -26,14 +32,14 @@ Examples: ```markup $ cat /etc/clickhouse-server/config.d/listen_host.xml - + :: - + $ cat /etc/clickhouse-server/config.d/macros.xml - + test host22 @@ -41,11 +47,11 @@ $ cat /etc/clickhouse-server/config.d/macros.xml 41295 host22.server.com - + cat /etc/clickhouse-server/config.d/zoo.xml - + localhost @@ -55,31 +61,31 @@ cat /etc/clickhouse-server/config.d/zoo.xml /clickhouse/test/task_queue/ddl - + cat /etc/clickhouse-server/users.d/enable_access_management_for_user_default.xml - + 1 - + cat /etc/clickhouse-server/users.d/memory_usage.xml - + 25290221568 50580443136 - + ``` -BTW, you can define any macro in your configuration and use them in Zookeeper paths +BTW, you can define any macro in your configuration and use them in [Zookeeper](https://docs.altinity.com/operationsguide/clickhouse-zookeeper/zookeeper-installation/) paths ```xml ReplicatedMergeTree('/clickhouse/{cluster}/tables/my_table','{replica}') @@ -101,23 +107,23 @@ Example how to delete **tcp_port** & **http_port** defined on higher level in th ```markup cat /etc/clickhouse-server/config.d/disable_open_network.xml - + - + ``` Example how to replace **remote_servers** section defined on higher level in the main config.xml (it allows to remove default test clusters. ```markup - + .... - + ``` ## Settings & restart @@ -132,9 +138,9 @@ But there are **exceptions** from those rules (see below). * `` * `` -* `` -* `` -* `` +* `` (since 19.12) +* `` (since 19.12) +* `` (since 21.11, also for versions older than v24 system tables are not updated with the new config values) * `` * `` * `` @@ -142,7 +148,7 @@ But there are **exceptions** from those rules (see below). * `` * `` * `` (but reconnect don't happen automatically) -* `` +* `` -- only if you add a new entity (disk/volume/policy), to modify these enitities restart is mandatory. * `` * `` * `` @@ -177,7 +183,7 @@ The list of user setting which require server restart: See also `select * from system.settings where description ilike '%start%'` -Also there are several 'long-running' user sessions which are almost never restarted and can keep the setting from the server start (it's DDLWorker, Kafka, and some other service things). +Also there are several 'long-running' user sessions which are almost never restarted and can keep the setting from the server start (it's DDLWorker, [Kafka](https://altinity.com/blog/kafka-engine-the-story-continues), and some other service things). ## Dictionaries @@ -201,13 +207,13 @@ and add to the configuration ```markup $ cat /etc/clickhouse-server/config.d/dictionaries.xml - + dict/*.xml true - + ``` -**dict/\*.xml** – relative path, servers seeks files in the folder **/etc/clickhouse-server/dict**. More info in [Multiple Clickhouse instances](altinity-kb-server-config-files.md#Multiple-Clickhouse-instances). +**dict/\*.xml** – relative path, servers seeks files in the folder **/etc/clickhouse-server/dict**. More info in [Multiple ClickHouse instances](#Multiple-ClickHouse-instances-at-one-host). ## incl attribute & metrica.xml @@ -220,7 +226,7 @@ For example to avoid repetition of user/password for each dictionary you can cre ```markup $ cat /etc/clickhouse-server/dict_sources.xml - + 3306 user @@ -231,7 +237,7 @@ $ cat /etc/clickhouse-server/dict_sources.xml my_database - + ``` Include this file: @@ -239,10 +245,10 @@ Include this file: ```markup $ cat /etc/clickhouse-server/config.d/dictionaries.xml - + ... /etc/clickhouse-server/dict_sources.xml - + ``` And use in dictionary descriptions (**incl="mysql_config"**): @@ -264,16 +270,16 @@ $ cat /etc/clickhouse-server/dict/country.xml ``` -## Multiple Clickhouse instances at one host +## Multiple ClickHouse instances at one host -By default Clickhouse server configs are in **/etc/clickhouse-server/** because clickhouse-server runs with a parameter **--config-file /etc/clickhouse-server/config.xml** +By default ClickHouse server configs are in **/etc/clickhouse-server/** because clickhouse-server runs with a parameter **--config-file /etc/clickhouse-server/config.xml** **config-file** is defined in startup scripts: * **/etc/init.d/clickhouse-server** – init-V * **/etc/systemd/system/clickhouse-server.service** – systemd -Clickhouse uses the path from **config-file** parameter as base folder and seeks for other configs by relative path. All sub-folders **users.d / config.d** are relative. +ClickHouse uses the path from **config-file** parameter as base folder and seeks for other configs by relative path. All sub-folders **users.d / config.d** are relative. You can start multiple **clickhouse-server** each with own **--config-file.** @@ -318,10 +324,10 @@ By default ClickHouse uses **/var/lib/clickhouse/**. It can be overridden in pat ## preprocessed_configs -Clickhouse server watches config files and folders. When you change, add or remove XML files Clickhouse immediately assembles XML files into a combined file. These combined files are stored in **/var/lib/clickhouse/preprocessed_configs/** folders. +ClickHouse server watches config files and folders. When you change, add or remove XML files ClickHouse immediately assembles XML files into a combined file. These combined files are stored in **/var/lib/clickhouse/preprocessed_configs/** folders. You can verify that your changes are valid by checking **/var/lib/clickhouse/preprocessed_configs/config.xml**, **/var/lib/clickhouse/preprocessed_configs/users.xml**. If something wrong with with your settings e.g. unclosed XML element or typo you can see alerts about this mistakes in **/var/log/clickhouse-server/clickhouse-server.log** -If you see your changes in **preprocessed_configs** it does not mean that changes are applied on running server, check [Settings & restart](altinity-kb-server-config-files.md#Settings-%26--restart) +If you see your changes in **preprocessed_configs** it does not mean that changes are applied on running server, check Settings and restart. diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-settings-to-adjust.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-settings-to-adjust.md index 00e2e6b367..243762e137 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-settings-to-adjust.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-settings-to-adjust.md @@ -8,7 +8,7 @@ description: > ```markup cat /etc/clickhouse-server/config.d/query_log.xml - + system
query_log
@@ -21,7 +21,7 @@ description: > SETTINGS ttl_only_drop_parts=1 -
+ ``` 2. `query_thread_log` - typically is not too useful for end users, you can disable it (or set up TTL). @@ -29,23 +29,23 @@ description: > ```markup $ cat /etc/clickhouse-server/users.d/z_log_queries.xml - + 0 - + ``` -2. If you have a good monitoring outside ClickHouse you don't need to store the history of metrics in ClickHouse +2. If you have a good monitoring outside ClickHouse® you don't need to store the history of metrics in ClickHouse ```markup cat /etc/clickhouse-server/config.d/disable_metric_logs.xml - + - + ``` @@ -53,7 +53,7 @@ description: > ```markup cat /etc/clickhouse-server/config.d/part_log.xml - + system part_log
@@ -66,34 +66,34 @@ description: > SETTINGS ttl_only_drop_parts=1
-
+ ``` 4. on older versions `log_queries` is disabled by default, it's worth having it enabled always. ```markup $ cat /etc/clickhouse-server/users.d/log_queries.xml - + 1 - + ``` 5. quite often you want to have on-disk group by / order by enabled (both disabled by default). ```markup cat /etc/clickhouse-server/users.d/enable_on_disk_operations.xml - + 2000000000 2000000000 - + ``` 6. quite often you want to create more users with different limitations. @@ -105,9 +105,29 @@ description: > You can find the preset example [here](https://clickhouse.tech/docs/en/operations/settings/settings-profiles/). Also, force_index_by_date + force_primary_key can be a nice idea to avoid queries that 'accidentally' do full scans, max_concurrent_queries_for_user -7. merge_tree settings: `max_bytes_to_merge_at_max_space_in_pool` (may be reduced in some scenarios), `fsync_*` , `inactive_parts_to_throw_insert` - can be enabled, `replicated_deduplication_window` - can be extended if single insert create lot of parts , `merge_with_ttl_timeout` - when you use ttl -8. settings `default_database_engine` / `insert_distributed_sync` / `fsync_metadata` / `do_not_merge_across_partitions_select_final` / fsync -9. memory usage per server / query / user: [memory configuration settings](altinity-kb-memory-configuration-settings.md) +7. merge_tree settings: `max_bytes_to_merge_at_max_space_in_pool` (may be reduced in some scenarios), `inactive_parts_to_throw_insert` - can be enabled, `replicated_deduplication_window` - can be extended if single insert create lot of parts , `merge_with_ttl_timeout` - when you use ttl + +8. `insert_distributed_sync` - for small clusters you may sometimes want to enable it +9. when the durability is the main requirement (or server / storage is not stable) - you may want to enable `fsync_*` setting (impacts the write performance significantly!!), and `insert_quorum` + +11. If you use FINAL queries - usually you want to enable `do_not_merge_across_partitions_select_final` + +9. memory usage per server / query / user: [memory configuration settings](/altinity-kb-setup-and-maintenance/altinity-kb-memory-configuration-settings/) + +10. if you use async_inserts - you often may want to increase max_concurrent_queries + +``` + + 500 + 400 + 100 + +``` + +11. materialize_ttl_after_modify=0 +12. access_management=1 +13. secret in + See also: diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-shutting-down-a-node.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-shutting-down-a-node.md index 56d24e0c49..8118b11c0c 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-shutting-down-a-node.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-shutting-down-a-node.md @@ -9,6 +9,7 @@ It’s possible to shutdown server on fly, but that would lead to failure of som More safer way: * Remove server (which is going to be disabled) from remote_server section of config.xml on all servers. + * avoid removing the last replica of the shard (that can lead to incorrect data placement if you use non-random distribution) * Remove server from load balancer, so new queries wouldn’t hit it. * Detach Kafka / Rabbit / Buffer tables (if used), and Materialized* databases. * Wait until all already running queries would finish execution on it. diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-system-tables-eat-my-disk.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-system-tables-eat-my-disk.md index a4f6e5cd0a..ae53835366 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-system-tables-eat-my-disk.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-system-tables-eat-my-disk.md @@ -1,18 +1,20 @@ --- -title: "System tables eat my disk" -linkTitle: "System tables eat my disk" +title: "System tables ate my disk" +linkTitle: "Regulating the size of System tables" description: > - System tables eat my disk + When the ClickHouse® SYSTEM database gets out of hand +keywords: + - clickhouse system tables --- > **Note 1:** System database stores virtual tables (**parts**, **tables,** **columns, etc.**) and \***_log** tables. > -> Virtual tables do not persist on disk. They reflect ClickHouse memory (c++ structures). They cannot be changed or removed. +> Virtual tables do not persist on disk. They reflect ClickHouse® memory (c++ structures). They cannot be changed or removed. > -> Log tables are named with postfix \***_log** and have the MergeTree engine. Clickhouse does not use information stored in these tables, this data is for you only. +> Log tables are named with postfix \***_log** and have the [MergeTree engine](/engines/mergetree-table-engine-family/). ClickHouse does not use information stored in these tables, this data is for you only. > > You can drop / rename / truncate \***_log** tables at any time. ClickHouse will recreate them in about 7 seconds (flush period). -> **Note 2:** Log tables with numeric postfixes (_1 / 2 / 3 ...) `query_log_1 query_thread_log_3` are results of Clickhouse upgrades. When a new version of Clickhouse starts and discovers that a system log table's schema is incompatible with a new schema, then Clickhouse renames the old *_log table to the name with the prefix and creates a table with the new schema. You can drop such tables if you don't need such historic data. +> **Note 2:** Log tables with numeric postfixes (_1 / 2 / 3 ...) `query_log_1 query_thread_log_3` are results of [ClickHouse upgrades](https://altinity.com/clickhouse-upgrade-overview/) (or other changes of schemas of these tables). When a new version of ClickHouse starts and discovers that a system log table's schema is incompatible with a new schema, then ClickHouse renames the old *_log table to the name with the prefix and creates a table with the new schema. You can drop such tables if you don't need such historic data. ## You can disable all / any of them @@ -21,9 +23,12 @@ Do not create log tables at all (a restart is needed for these changes to take e ```markup $ cat /etc/clickhouse-server/config.d/z_log_disable.xml - + + + + @@ -34,21 +39,24 @@ $ cat /etc/clickhouse-server/config.d/z_log_disable.xml - + + + ``` **We do not recommend removing `query_log` and `query_thread_log` as queries' (they have very useful information for debugging), and logging can be easily turned off without a restart through user profiles:** ```markup $ cat /etc/clickhouse-server/users.d/z_log_queries.xml - + 0 0 + 0 - + ``` Hint: `z_log_disable.xml` is named with **z_** in the beginning, it means this config will be applied the last and will override all other config files with these sections (config are applied in alphabetical order). @@ -73,8 +81,8 @@ Example for `query_log`. It drops partitions with data older than 14 days: ```markup $ cat /etc/clickhouse-server/config.d/query_log_ttl.xml - - + + system query_log
ENGINE = MergeTree PARTITION BY (event_date) @@ -83,10 +91,10 @@ $ cat /etc/clickhouse-server/config.d/query_log_ttl.xml 7500
-
+ ``` -After that you need to restart ClickHouse and drop or rename the existing system.query_log table, then CH creates a new table with these settings. +After that you need to restart ClickHouse and *if using old clickhouse versions like 20 or less*, drop or rename the existing system.query_log table and then CH creates a new table with these settings. This is automatically done in newer versions 21+. ```sql RENAME TABLE system.query_log TO system.query_log_1; @@ -96,7 +104,7 @@ Important part here is a daily partitioning `PARTITION BY (event_date)` in this Usual TTL processing (when table partitioned by toYYYYMM and TTL by day) is heavy CPU / Disk I/O consuming operation which re-writes data parts without expired rows. -You can add TTL without ClickHouse restart (and table dropping or renaming): +You can [add TTL without ClickHouse restart](/altinity-kb-queries-and-syntax/ttl/modify-ttl/) (and table dropping or renaming): ```sql ALTER TABLE system.query_log MODIFY TTL event_date + INTERVAL 14 DAY; @@ -111,16 +119,18 @@ This way just adds TTL to a table and leaves monthly (default) partitioning (wil ```markup $ cat /etc/clickhouse-server/config.d/query_log_ttl.xml - + system query_log
event_date + INTERVAL 30 DAY DELETE
-
+ ``` +💡 For the [clickhouse-operator](https://github.com/Altinity/clickhouse-operator/blob/master/README.md), the above method of using only the `` tag without `` or `` is recommended, because of possible configuration clashes. + +After that you need to restart ClickHouse and *if using old clickhouse versions like 20 or less*, drop or rename the existing system.query_log table and then CH creates a new table with these settings. This is automatically done in newer versions 21+. -After that you need to restart ClickHouse and drop or rename the existing system.query_log table, then CH creates a new table with this TTL setting. ## You can disable logging on a session level or in user’s profile (for all or specific users) @@ -132,12 +142,13 @@ Let’s disable query logging for all users (profile = default, all other profil ```markup cat /etc/clickhouse-server/users.d/log_queries.xml - + 0 0 + 0 - + ``` diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-who-ate-my-memory.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-who-ate-my-memory.md index cc2ac0c2a4..da58c80b7e 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-who-ate-my-memory.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-who-ate-my-memory.md @@ -1,10 +1,57 @@ --- -title: "Who ate my memory" -linkTitle: "Who ate my memory" +title: "Who ate my ClickHouse® memory?" +linkTitle: "Who ate my memory?" description: > - Who ate my memory + *"It was here a few minutes ago..."* +keywords: + - clickhouse memory + - clickhouse memory usage --- + +``` +SYSTEM JEMALLOC PURGE; + +SELECT 'OS' as group, metric as name, toInt64(value) as val FROM system.asynchronous_metrics WHERE metric like 'OSMemory%' + UNION ALL +SELECT 'Caches' as group, metric as name, toInt64(value) FROM system.asynchronous_metrics WHERE metric LIKE '%CacheBytes' + UNION ALL +SELECT 'MMaps' as group, metric as name, toInt64(value) FROM system.metrics WHERE metric LIKE 'MMappedFileBytes' + UNION ALL +SELECT 'Process' as group, metric as name, toInt64(value) FROM system.asynchronous_metrics WHERE metric LIKE 'Memory%' + UNION ALL +SELECT 'MemoryTable', engine as name, toInt64(sum(total_bytes)) FROM system.tables WHERE engine IN ('Join','Memory','Buffer','Set') GROUP BY engine + UNION ALL +SELECT 'StorageBuffer' as group, metric as name, toInt64(value) FROM system.metrics WHERE metric='StorageBufferBytes' + UNION ALL +SELECT 'Queries' as group, left(query,7) as name, toInt64(sum(memory_usage)) FROM system.processes GROUP BY name + UNION ALL +SELECT 'Dictionaries' as group, type as name, toInt64(sum(bytes_allocated)) FROM system.dictionaries GROUP BY name + UNION ALL +SELECT 'PrimaryKeys' as group, 'db:'||database as name, toInt64(sum(primary_key_bytes_in_memory_allocated)) FROM system.parts GROUP BY name + UNION ALL +SELECT 'Merges' as group, 'db:'||database as name, toInt64(sum(memory_usage)) FROM system.merges GROUP BY name + UNION ALL +SELECT 'InMemoryParts' as group, 'db:'||database as name, toInt64(sum(data_uncompressed_bytes)) FROM system.parts WHERE part_type = 'InMemory' GROUP BY name + UNION ALL +SELECT 'AsyncInserts' as group, 'db:'||database as name, toInt64(sum(total_bytes)) FROM system.asynchronous_inserts GROUP BY name + UNION ALL +SELECT 'FileBuffersVirtual' as group, metric as name, toInt64(value * 2*1024*1024) FROM system.metrics WHERE metric like 'OpenFileFor%' + UNION ALL +SELECT 'ThreadStacksVirual' as group, metric as name, toInt64(value * 8*1024*1024) FROM system.metrics WHERE metric = 'GlobalThread' + UNION ALL +SELECT 'UserMemoryTracking' as group, user as name, toInt64(memory_usage) FROM system.user_processes + UNION ALL +select 'QueryCacheBytes' as group, '', toInt64(sum(result_size)) FROM system.query_cache + UNION ALL +SELECT 'MemoryTracking' as group, 'total' as name, toInt64(value) FROM system.metrics WHERE metric = 'MemoryTracking' +``` + ```sql +SELECT *, formatReadableSize(value) +FROM system.metrics +WHERE (metric ilike '%Cach%' or metric ilike '%Mem%') and value != 0 +order by metric format PrettyCompactMonoBlock; + SELECT *, formatReadableSize(value) FROM system.asynchronous_metrics WHERE metric like '%Cach%' or metric like '%Mem%' @@ -34,6 +81,8 @@ SELECT formatReadableSize(sum(memory_usage)) FROM system.merges; SELECT formatReadableSize(sum(memory_usage)) FROM system.processes; +select formatReadableSize(sum(result_size)) FROM system.query_cache; + SELECT initial_query_id, elapsed, @@ -88,3 +137,109 @@ for i in `seq 1 600`; do clickhouse-client --empty_result_for_aggregation_by_emp 0.00 B 0.00 B 21.36 MiB 1.58 GiB 911.07 MiB ``` + +## retrospection analysis of the RAM usage based on query_log and part_log (shows peaks) + +```sql +WITH + now() - INTERVAL 24 HOUR AS min_time, -- you can adjust that + now() AS max_time, -- you can adjust that + INTERVAL 1 HOUR as time_frame_size +SELECT + toStartOfInterval(event_timestamp, time_frame_size) as timeframe, + formatReadableSize(max(mem_overall)) as peak_ram, + formatReadableSize(maxIf(mem_by_type, event_type='Insert')) as inserts_ram, + formatReadableSize(maxIf(mem_by_type, event_type='Select')) as selects_ram, + formatReadableSize(maxIf(mem_by_type, event_type='MergeParts')) as merge_ram, + formatReadableSize(maxIf(mem_by_type, event_type='MutatePart')) as mutate_ram, + formatReadableSize(maxIf(mem_by_type, event_type='Alter')) as alter_ram, + formatReadableSize(maxIf(mem_by_type, event_type='Create')) as create_ram, + formatReadableSize(maxIf(mem_by_type, event_type not IN ('Insert', 'Select', 'MergeParts','MutatePart', 'Alter', 'Create') )) as other_types_ram, + groupUniqArrayIf(event_type, event_type not IN ('Insert', 'Select', 'MergeParts','MutatePart', 'Alter', 'Create') ) as other_types +FROM ( + SELECT + toDateTime( toUInt32(ts) ) as event_timestamp, + t as event_type, + SUM(mem) OVER (PARTITION BY t ORDER BY ts) as mem_by_type, + SUM(mem) OVER (ORDER BY ts) as mem_overall + FROM + ( + WITH arrayJoin([(toFloat64(event_time_microseconds) - (duration_ms / 1000), toInt64(peak_memory_usage)), (toFloat64(event_time_microseconds), -peak_memory_usage)]) AS data + SELECT + CAST(event_type,'LowCardinality(String)') as t, + data.1 as ts, + data.2 as mem + FROM system.part_log + WHERE event_time BETWEEN min_time AND max_time AND peak_memory_usage != 0 + + UNION ALL + + WITH arrayJoin([(toFloat64(query_start_time_microseconds), toInt64(memory_usage)), (toFloat64(event_time_microseconds), -memory_usage)]) AS data + SELECT + query_kind, + data.1 as ts, + data.2 as mem + FROM system.query_log + WHERE event_time BETWEEN min_time AND max_time AND memory_usage != 0 + + UNION ALL + + WITH + arrayJoin([(toFloat64(event_time_microseconds) - (view_duration_ms / 1000), toInt64(peak_memory_usage)), (toFloat64(event_time_microseconds), -peak_memory_usage)]) AS data + SELECT + CAST(toString(view_type)||'View','LowCardinality(String)') as t, + data.1 as ts, + data.2 as mem + FROM system.query_views_log + WHERE event_time BETWEEN min_time AND max_time AND peak_memory_usage != 0 +) +) +GROUP BY timeframe +ORDER BY timeframe +FORMAT PrettyCompactMonoBlock; +``` + +## retrospection analysis of trace_log + +```sql +WITH + now() - INTERVAL 24 HOUR AS min_time, -- you can adjust that + now() AS max_time -- you can adjust that +SELECT + trace_type, + count(), + topK(20)(query_id) +FROM system.trace_log +WHERE event_time BETWEEN min_time AND max_time +GROUP BY trace_type; + +SELECT + t, + count() AS queries, + formatReadableSize(sum(peak_size)) AS sum_of_peaks, + formatReadableSize(max(peak_size)) AS biggest_query_peak, + argMax(query_id, peak_size) AS query +FROM +( + SELECT + toStartOfInterval(event_time, toIntervalMinute(5)) AS t, + query_id, + max(size) AS peak_size + FROM system.trace_log + WHERE (trace_type = 'MemoryPeak') AND (event_time > (now() - toIntervalHour(24))) + GROUP BY + t, + query_id +) +GROUP BY t +ORDER BY t ASC; + +-- later on you can check particular query_ids in query_log +``` + +## analysis of the server text logs + +``` +grep MemoryTracker /var/log/clickhouse-server.log +zgrep MemoryTracker /var/log/clickhouse-server.log.*.gz +``` diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/_index.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/_index.md index 9f33f3b125..39643eab13 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/_index.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/_index.md @@ -17,7 +17,7 @@ TLDR version: 6) [monitor zookeeper](https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/zookeeper-monitoring/). Side note: -in many cases, the slowness of the zookeeper is actually a symptom of some issue with clickhouse schema/usage pattern (the most typical issues: an enormous number of partitions/tables/databases with real-time inserts, tiny & frequent inserts). +in many cases, the slowness of the zookeeper is actually a symptom of some issue with ClickHouse® schema/usage pattern (the most typical issues: an enormous number of partitions/tables/databases with real-time inserts, tiny & frequent inserts). ### How to install @@ -61,11 +61,9 @@ zk_sum_follower_sync_time 0 [https://github.com/apache/zookeeper/blob/master/zookeeper-docs/src/main/resources/markdown/zookeeperTools.md](https://github.com/apache/zookeeper/blob/master/zookeeper-docs/src/main/resources/markdown/zookeeperTools.md) -## Alternatives for zkCli +## Alternative for zkCli * [https://github.com/go-zkcli/zkcli](https://github.com/go-zkcli/zkcli) -* [https://github.com/outbrain/zookeepercli](https://github.com/outbrain/zookeepercli) -* [https://idata.co.il/2018/07/a-day-at-the-zoo-graphic-uis-for-apache-zookeeper/](https://idata.co.il/2018/07/a-day-at-the-zoo-graphic-uis-for-apache-zookeeper/) ## Web UI diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/altinity-kb-proper-setup.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/altinity-kb-proper-setup.md index 6644f215a9..df65037097 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/altinity-kb-proper-setup.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/altinity-kb-proper-setup.md @@ -20,7 +20,7 @@ TLDR version: 6) monitor zookeeper. Side note: -in many cases, the slowness of the zookeeper is actually a symptom of some issue with clickhouse schema/usage pattern (the most typical issues: an enormous number of partitions/tables/databases with real-time inserts, tiny & frequent inserts). +in many cases, the slowness of the zookeeper is actually a symptom of some issue with ClickHouse® schema/usage pattern (the most typical issues: an enormous number of partitions/tables/databases with real-time inserts, tiny & frequent inserts). Some doc about that subject: diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/altinity-kb-recovering-from-complete-metadata-loss-in-zookeeper.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/altinity-kb-recovering-from-complete-metadata-loss-in-zookeeper.md index de64acd474..8f7507d9c7 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/altinity-kb-recovering-from-complete-metadata-loss-in-zookeeper.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/altinity-kb-recovering-from-complete-metadata-loss-in-zookeeper.md @@ -6,7 +6,7 @@ description: > --- ## Problem -Every ClickHouse user experienced a loss of ZooKeeper one day. While the data is available and replicas respond to queries, inserts are no longer possible. ClickHouse uses ZooKeeper in order to store the reference version of the table structure and part of data, and when it is not available can not guarantee data consistency anymore. Replicated tables turn to the read-only mode. In this article we describe step-by-step instructions of how to restore ZooKeeper metadata and bring ClickHouse cluster back to normal operation. +Every ClickHouse® user experienced a loss of ZooKeeper one day. While the data is available and replicas respond to queries, inserts are no longer possible. ClickHouse uses ZooKeeper in order to store the reference version of the table structure and part of data, and when it is not available can not guarantee data consistency anymore. Replicated tables turn to the read-only mode. In this article we describe step-by-step instructions of how to restore ZooKeeper metadata and bring ClickHouse cluster back to normal operation. In order to restore ZooKeeper we have to solve two tasks. First, we need to restore table metadata in ZooKeeper. Currently, the only way to do it is to recreate the table with the `CREATE TABLE DDL` statement. @@ -14,7 +14,7 @@ In order to restore ZooKeeper we have to solve two tasks. First, we need to rest CREATE TABLE table_name ... ENGINE=ReplicatedMergeTree('zookeeper_path','replica_name'); ``` -The second and more difficult task is to populate zookeeper with information of clickhouse data parts. As mentioned above, ClickHouse stores the reference data about all parts of replicated tables in ZooKeeper, so we have to traverse all partitions and re-attach them to the recovered replicated table in order to fix that. +The second and more difficult task is to populate zookeeper with information of ClickHouse data parts. As mentioned above, ClickHouse stores the reference data about all parts of replicated tables in ZooKeeper, so we have to traverse all partitions and re-attach them to the recovered replicated table in order to fix that. {{% alert title="Info" color="info" %}} Starting from ClickHouse version 21.7 there is SYSTEM RESTORE REPLICA command @@ -52,7 +52,7 @@ Now let’s remove metadata in zookeeper using `ZkCli.sh` at ZooKeeper host: deleteall /clickhouse/cluster_1/tables/01/table_repl ``` -And try to resync clickhouse replica state with zookeeper: +And try to resync ClickHouse replica state with zookeeper: ```sql SYSTEM RESTART REPLICA table_repl; diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/altinity-kb-zookeeper-backup.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/altinity-kb-zookeeper-backup.md index b37f0c8799..04db08384a 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/altinity-kb-zookeeper-backup.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/altinity-kb-zookeeper-backup.md @@ -4,30 +4,19 @@ linkTitle: "ZooKeeper backup" description: > ZooKeeper backup --- -You may have a question: “Do I need to backup Zookeeper Database, because it’s pretty important for ClickHouse?” -Answer: _ZK is in memory database. All nodes of ZK has exactly the same data._ +Question: Do I need to backup Zookeeper Database, because it’s pretty important for ClickHouse®? -_If you have 3 ZK servers, then you have 3 copies of (3 backups) already._ +TLDR answer: **NO, just backup ClickHouse data itself, and do SYSTEM RESTORE REPLICA during recovery to recreate zookeeper data** -_To backup ZK has no sense because you need to have a snapshot of ZK + last ZK logs to exactly the last ZK transaction._ +Details: -_You cannot use ZK database backed up 3 hours ago or 3 minutes ago._ +Zookeeper does not store any data, it stores the STATE of the distributed system ("that replica have those parts", "still need 2 merges to do", "alter is being applied" etc). That state always changes, and you can not capture / backup / and recover that state in a safe manner. So even backup from few seconds ago is representing some 'old state from the past' which is INCONSISTENT with actual state of the data. -_ZK restored from the backup will be inconsistent with CH database._ +In other words - if ClickHouse is working - then the state of distributed system always changes, and it's almost impossible to collect the current state of zookeeper (while you collecting it it will change many times). The only exception is 'stop-the-world' scenario - i.e. shutdown all ClickHouse nodes, with all other zookeeper clients, then shutdown all the zookeeper, and only then take the backups, in that scenario and backups of zookeeper & ClickHouse will be consistent. In that case restoring the backup is as simple (and is equal to) as starting all the nodes which was stopped before. But usually that scenario is very non-practical because it requires huge downtime. -_Answer2: Usually, it doesn't have too much sense. It's very hard to take zookeeper snapshot at exactly the same state as clickhouse. (well maybe if you will turn of clickhouses, then you can take snapshots of clickhouse AND zookeepers). So for example on clouds if you can stop all nodes and take disk snapshots - it will just work._ +So what to do instead? It's enough if you will backup ClickHouse data itself, and to recover the state of zookeeper you can just run the command `SYSTEM RESTORE REPLICA` command **AFTER** restoring the ClickHouse data itself. That will recreate the state of the replica in the zookeeper as it exists on the filesystem after backup recovery. -_But while clickhouse is working it's almost impossible to collect the current state of zookeeper._ +Normally Zookeeper ensemble consists of 3 nodes, which is enough to survive hardware failures. -_You need to restore zookeeper and clickhouse snapshots from EXACTLY THE SAME moment of time - no procedure is needed. Just start & run._ - -_Also, that allows only to snapshot of clickhouse & zookeeper as a whole. You can not do partial backups then._ - -_If you lose zookeeper data while having clickhouse data (or backups of clickhouse data) - you can restore the zookeeper state from clickhouse state._ - -_With a couple of tables, it can be done manually._ - -_On scale, you can use_ [https://github.com/Altinity/clickhouse-zookeeper-recovery](https://github.com/Altinity/clickhouse-zookeeper-recovery) - -_In future it will be even simpler_ [https://github.com/ClickHouse/ClickHouse/pull/13652](https://github.com/ClickHouse/ClickHouse/pull/13652) +On older version (which don't have `SYSTEM RESTORE REPLICA` command - it can be done manually, using instruction https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#converting-from-mergetree-to-replicatedmergetree), on scale you can try [https://github.com/Altinity/clickhouse-zookeeper-recovery](https://github.com/Altinity/clickhouse-zookeeper-recovery) diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/clickhouse-keeper-initd.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/clickhouse-keeper-initd.md new file mode 100644 index 0000000000..39787ff73c --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/clickhouse-keeper-initd.md @@ -0,0 +1,123 @@ +--- +title: "clickhouse-keeper-initd" +linkTitle: "clickhouse-keeper-initd" +weight: 100 +description: >- + clickhouse-keeper-initd +--- + +## clickhouse-keeper-initd + +An init.d script for clickhouse-keeper. +This example is based on zkServer.sh +```bash +#!/bin/bash +### BEGIN INIT INFO +# Provides: clickhouse-keeper +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Required-Start: +# Required-Stop: +# Short-Description: Start keeper daemon +# Description: Start keeper daemon +### END INIT INFO + +NAME=clickhouse-keeper +ZOOCFGDIR=/etc/$NAME +ZOOCFG="$ZOOCFGDIR/keeper.xml" +ZOO_LOG_DIR=/var/log/$NAME +USER=clickhouse +GROUP=clickhouse +ZOOPIDDIR=/var/run/$NAME +ZOOPIDFILE=$ZOOPIDDIR/$NAME.pid +SCRIPTNAME=/etc/init.d/$NAME + +#echo "Using config: $ZOOCFG" >&2 +ZOOCMD="clickhouse-keeper -C ${ZOOCFG} start --daemon" + +# ensure PIDDIR exists, otw stop will fail +mkdir -p "$(dirname "$ZOOPIDFILE")" + +if [ ! -w "$ZOO_LOG_DIR" ] ; then +mkdir -p "$ZOO_LOG_DIR" +fi + +case $1 in +start) + echo -n "Starting keeper ... " + if [ -f "$ZOOPIDFILE" ]; then + if kill -0 `cat "$ZOOPIDFILE"` > /dev/null 2>&1; then + echo already running as process `cat "$ZOOPIDFILE"`. + exit 0 + fi + fi + sudo -u clickhouse `echo "$ZOOCMD"` + if [ $? -eq 0 ] + then + pgrep -f "$ZOOCMD" > "$ZOOPIDFILE" + echo "PID:" `cat $ZOOPIDFILE` + if [ $? -eq 0 ]; + then + sleep 1 + echo STARTED + else + echo FAILED TO WRITE PID + exit 1 + fi + else + echo SERVER DID NOT START + exit 1 + fi + ;; +start-foreground) + sudo -u clickhouse clickhouse-keeper -C "$ZOOCFG" start + ;; +print-cmd) + echo "sudo -u clickhouse ${ZOOCMD}" + ;; +stop) + echo -n "Stopping keeper ... " + if [ ! -f "$ZOOPIDFILE" ] + then + echo "no keeper to stop (could not find file $ZOOPIDFILE)" + else + ZOOPID=$(cat "$ZOOPIDFILE") + echo $ZOOPID + kill $ZOOPID + while true; do + sleep 3 + if kill -0 $ZOOPID > /dev/null 2>&1; then + echo $ZOOPID is still running + else + break + fi + done + rm "$ZOOPIDFILE" + echo STOPPED + fi + exit 0 + ;; +restart) + shift + "$0" stop ${@} + sleep 3 + "$0" start ${@} + ;; +status) + clientPortAddress="localhost" + clientPort=2181 + STAT=`echo srvr | nc $clientPortAddress $clientPort 2> /dev/null | grep Mode` + if [ "x$STAT" = "x" ] + then + echo "Error contacting service. It is probably not running." + exit 1 + else + echo $STAT + exit 0 + fi + ;; +*) + echo "Usage: $0 {start|start-foreground|stop|restart|status|print-cmd}" >&2 + +esac +``` diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/clickhouse-keeper-service.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/clickhouse-keeper-service.md new file mode 100644 index 0000000000..aec2656f37 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/clickhouse-keeper-service.md @@ -0,0 +1,242 @@ +--- +title: "clickhouse-keeper-service" +linkTitle: "clickhouse-keeper-service" +weight: 100 +description: >- + clickhouse-keeper-service +--- + +## clickhouse-keeper-service + +### installation + +Need to install `clickhouse-common-static` + `clickhouse-keeper` OR `clickhouse-common-static` + `clickhouse-server`. +Both OK, use the first if you don't need ClickHouse® server locally. + +```bash +dpkg -i clickhouse-common-static_{%version}.deb clickhouse-keeper_{%version}.deb +``` + +```bash +dpkg -i clickhouse-common-static_{%version}.deb clickhouse-server_{%version}.deb clickhouse-client_{%version}.deb +``` + +Create directories + +```bash +mkdir -p /etc/clickhouse-keeper/config.d +mkdir -p /var/log/clickhouse-keeper +mkdir -p /var/lib/clickhouse-keeper/coordination/log +mkdir -p /var/lib/clickhouse-keeper/coordination/snapshots +mkdir -p /var/lib/clickhouse-keeper/cores + +chown -R clickhouse.clickhouse /etc/clickhouse-keeper /var/log/clickhouse-keeper /var/lib/clickhouse-keeper +``` + +### config + +``` +cat /etc/clickhouse-keeper/config.xml + + + + + + trace + /var/log/clickhouse-keeper/clickhouse-keeper.log + /var/log/clickhouse-keeper/clickhouse-keeper.err.log + + 1000M + 10 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /var/lib/clickhouse-keeper/ + /var/lib/clickhouse-keeper/cores + + + 2181 + 1 + /var/lib/clickhouse-keeper/coordination/log + /var/lib/clickhouse-keeper/coordination/snapshots + + + 10000 + 30000 + trace + 10000 + + + + + 1 + localhost + 9444 + + + + +``` + +``` +cat /etc/clickhouse-keeper/config.d/keeper.xml + + + :: + + 2181 + 1 + + + 1 + keeper-host-1 + 9444 + + + 2 + keeper-host-2 + 9444 + + + 3 + keeper-host-3 + 9444 + + + + +``` + +### systemd service + +``` +cat /lib/systemd/system/clickhouse-keeper.service +[Unit] +Description=ClickHouse Keeper (analytic DBMS for big data) +Requires=network-online.target +# NOTE: that After/Wants=time-sync.target is not enough, you need to ensure +# that the time was adjusted already, if you use systemd-timesyncd you are +# safe, but if you use ntp or some other daemon, you should configure it +# additionaly. +After=time-sync.target network-online.target +Wants=time-sync.target + +[Service] +Type=simple +User=clickhouse +Group=clickhouse +Restart=always +RestartSec=30 +RuntimeDirectory=clickhouse-keeper +ExecStart=/usr/bin/clickhouse-keeper --config=/etc/clickhouse-keeper/config.xml --pid-file=/run/clickhouse-keeper/clickhouse-keeper.pid +# Minus means that this file is optional. +EnvironmentFile=-/etc/default/clickhouse +LimitCORE=infinity +LimitNOFILE=500000 +CapabilityBoundingSet=CAP_NET_ADMIN CAP_IPC_LOCK CAP_SYS_NICE CAP_NET_BIND_SERVICE + +[Install] +# ClickHouse should not start from the rescue shell (rescue.target). +WantedBy=multi-user.target +``` + +``` +systemctl daemon-reload + +systemctl status clickhouse-keeper + +systemctl start clickhouse-keeper +``` + +### debug start without service (as foreground application) + +``` +sudo -u clickhouse /usr/bin/clickhouse-keeper --config=/etc/clickhouse-keeper/config.xml +``` diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/clickhouse-keeper.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/clickhouse-keeper.md index 7d1cca4346..c35a6037c2 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/clickhouse-keeper.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/clickhouse-keeper.md @@ -1,29 +1,63 @@ --- -title: "clickhouse-keeper" -linkTitle: "clickhouse-keeper" +title: "Using clickhouse-keeper" +linkTitle: "Using clickhouse-keeper" description: > - clickhouse-keeper + Moving to the ClickHouse® alternative to Zookeeper +keywords: + - clickhouse keeper + - clickhouse-keeper --- -In 21.3 there is already an option to run own clickhouse zookeeper implementation. It's still experimental, and still need to be started additionally on few nodes (similar to 'normal' zookeeper) and speaks normal zookeeper protocol - needed to simplify A/B tests with real zookeeper. -No docs, for now, only PR with code & tests. Of course, if you want to play with it - you can, and early feedback is very valuable. But be prepared for a lot of tiny issues here and there, so don't be disappointed if it will not satisfy your expectations for some reason. It's very-very fresh :slightly_smiling_face: It's ready for some trial runs, but not ready yet for production use cases. +Since 2021 the development of built-in ClickHouse® alternative for Zookeeper is happening, whose goal is to address several design pitfalls, and get rid of extra dependency. + +See slides: https://presentations.clickhouse.com/meetup54/keeper.pdf and video https://youtu.be/IfgtdU1Mrm0?t=2682 + +## Current status (last updated: July 2023) + +Since version 23.3 we recommend using clickhouse-keeper for new installations. + +Even better if you will use the latest version of clickhouse-keeper (currently it's 23.7), and it's not necessary to use the same version of clickhouse-keeper as ClickHouse itself. + +For existing systems that currently use Apache Zookeeper, you can consider upgrading to clickhouse-keeper especially if you will [upgrade ClickHouse](https://altinity.com/clickhouse-upgrade-overview/) also. + +But please remember that on very loaded systems the change can give no performance benefits or can sometimes lead to a worse performance. + +The development pace of keeper code is [still high](https://github.com/ClickHouse/ClickHouse/pulls?q=is%3Apr+keeper) +so every new version should bring improvements / cover the issues, and stability/maturity grows from version to version, so +if you want to play with clickhouse-keeper in some environment - please use [the most recent ClickHouse releases](https://altinity.com/altinity-stable/)! And of course: share your feedback :) + +## How does clickhouse-keeper work? + +Official docs: https://clickhouse.com/docs/en/guides/sre/keeper/clickhouse-keeper/ + +ClickHouse-keeper still need to be started additionally on few nodes (similar to 'normal' zookeeper) and speaks normal zookeeper protocol - needed to simplify A/B tests with real zookeeper. To test that you need to run 3 instances of clickhouse-server (which will mimic zookeeper) with an extra config like that: -[https://github.com/ClickHouse/ClickHouse/blob/c8b1004ecb4bfc4aa581dbcbbbe3a4c72ce57123/tests/integration/test_keeper_multinode_simple/configs/enable_keeper1.xml](https://github.com/ClickHouse/ClickHouse/blob/c8b1004ecb4bfc4aa581dbcbbbe3a4c72ce57123/tests/integration/test_keeper_multinode_simple/configs/enable_keeper1.xml) +[https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_keeper_multinode_simple/configs/enable_keeper1.xml](https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_keeper_multinode_simple/configs/enable_keeper1.xml) -[https://github.com/ClickHouse/ClickHouse/blob/c8b1004ecb4bfc4aa581dbcbbbe3a4c72ce57123/tests/integration/test_keeper_snapshots/configs/enable_keeper.xml](https://github.com/ClickHouse/ClickHouse/blob/c8b1004ecb4bfc4aa581dbcbbbe3a4c72ce57123/tests/integration/test_keeper_snapshots/configs/enable_keeper.xml) +[https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_keeper_snapshots/configs/enable_keeper.xml](https://github.com/ClickHouse/ClickHouse/blob/master/tests/integration/test_keeper_snapshots/configs/enable_keeper.xml) or event single instance with config like that: [https://github.com/ClickHouse/ClickHouse/blob/master/tests/config/config.d/keeper_port.xml](https://github.com/ClickHouse/ClickHouse/blob/master/tests/config/config.d/keeper_port.xml) [https://github.com/ClickHouse/ClickHouse/blob/master/tests/config/config.d/zookeeper.xml](https://github.com/ClickHouse/ClickHouse/blob/master/tests/config/config.d/zookeeper.xml) -And point all the clickhouses (zookeeper config secton) to those nodes / ports. +And point all the ClickHouses (zookeeper config section) to those nodes / ports. + +Latest version is recommended (even testing / master builds). We will be thankful for any feedback. + +## systemd service file + +See +https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/clickhouse-keeper-service/ + +## init.d script -Latest testing version is recommended. We will be thankful for any feedback. +See +https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/clickhouse-keeper-initd/ -## Example of a simple cluster with 2 nodes of Clickhouse using built-in keeper +## Example of a simple cluster with 2 nodes of ClickHouse using built-in keeper -For example you can start two Clikhouse nodes (hostname1, hostname2) +For example you can start two ClickHouse nodes (hostname1, hostname2) ### hostname1 @@ -170,7 +204,7 @@ $ cat /etc/clickhouse-server/config.d/clusters.xml Then create a table ```sql -create table test on '{cluster}' ( A Int64, S String) +create table test on cluster '{cluster}' ( A Int64, S String) Engine = ReplicatedMergeTree('/clickhouse/{cluster}/tables/{database}/{table}','{replica}') Order by A; diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/install_ubuntu.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/install_ubuntu.md index 15a6772cc1..abf0e1d579 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/install_ubuntu.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/install_ubuntu.md @@ -1,9 +1,9 @@ --- -title: "Install standalone Zookeeper for ClickHouse on Ubuntu / Debian" +title: "Install standalone Zookeeper for ClickHouse® on Ubuntu / Debian" linkTitle: "Zookeeper install on Ubuntu" weight: 100 description: >- - Install standalone Zookeeper for ClickHouse on Ubuntu / Debian. + Install standalone Zookeeper for ClickHouse® on Ubuntu / Debian. --- ## Reference script to install standalone Zookeeper for Ubuntu / Debian diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/jvm-sizes-and-garbage-collector-settings.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/jvm-sizes-and-garbage-collector-settings.md index 2789eea921..225f505c1a 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/jvm-sizes-and-garbage-collector-settings.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/jvm-sizes-and-garbage-collector-settings.md @@ -36,15 +36,15 @@ Set the Java heap size smaller than available RAM size on the node. This is very 1. `MaxGCPauseMillis=50` (by default 200) - the 'target' acceptable pause for garbage collection (milliseconds) -1. `jute.maxbuffer` limits the maximum size of znode content. By default it's 1Mb. In some usecases (lot of partitions in table) ClickHouse may need to create bigger znodes. +1. `jute.maxbuffer` limits the maximum size of znode content. By default it's 1Mb. In some usecases (lot of partitions in table) ClickHouse® may need to create bigger znodes. 1. (optional) enable GC logs: `-Xloggc:/path_to/gc.log` -## Zookeeper configurarion used by Yandex Metrika (from 2017) +## Zookeeper configuration used by Yandex Metrika (from 2017) -The configuration used by Yandex ( [https://clickhouse.tech/docs/en/operations/tips/\#zookeeper](https://clickhouse.tech/docs/en/operations/tips/#zookeeper) ) - they use older JVM version (with `UseParNewGC` garbage collector), and tune GC logs heavily: +The configuration used by Yandex ( [https://clickhouse.com/docs/en/operations/tips#zookeeper](https://clickhouse.com/docs/en/operations/tips#zookeeper) ) - they use older JVM version (with `UseParNewGC` garbage collector), and tune GC logs heavily: ```bash JAVA_OPTS="-Xms{{ cluster.get('xms','128M') }} \ @@ -74,7 +74,6 @@ JAVA_OPTS="-Xms{{ cluster.get('xms','128M') }} \ * [https://docs.oracle.com/cd/E40972_01/doc.70/e40973/cnf_jvmgc.htm\#autoId2](https://docs.oracle.com/cd/E40972_01/doc.70/e40973/cnf_jvmgc.htm#autoId2) * [https://docs.cloudera.com/runtime/7.2.7/kafka-performance-tuning/topics/kafka-tune-broker-tuning-jvm.html](https://docs.cloudera.com/runtime/7.2.7/kafka-performance-tuning/topics/kafka-tune-broker-tuning-jvm.html) * [https://docs.cloudera.com/documentation/enterprise/6/6.3/topics/cm-tune-g1gc.html](https://docs.cloudera.com/documentation/enterprise/6/6.3/topics/cm-tune-g1gc.html) -* [https://blog.sokolenko.me/2014/11/javavm-options-production.html](https://blog.sokolenko.me/2014/11/javavm-options-production.html) * [https://www.maknesium.de/21-most-important-java-8-vm-options-for-servers](https://www.maknesium.de/21-most-important-java-8-vm-options-for-servers) * [https://docs.oracle.com/javase/10/gctuning/introduction-garbage-collection-tuning.htm\#JSGCT-GUID-326EB4CF-8C8C-4267-8355-21AB04F0D304](https://docs.oracle.com/javase/10/gctuning/introduction-garbage-collection-tuning.htm#JSGCT-GUID-326EB4CF-8C8C-4267-8355-21AB04F0D304) * [https://github.com/chewiebug/GCViewer](https://github.com/chewiebug/GCViewer) diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/zookeeper-cluster-migration-k8s-node-storage.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/zookeeper-cluster-migration-k8s-node-storage.md new file mode 100644 index 0000000000..eb88c07037 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/zookeeper-cluster-migration-k8s-node-storage.md @@ -0,0 +1,43 @@ +--- +title: "ZooKeeper cluster migration when using K8s node local storage" +linkTitle: "ZooKeeper cluster migration when using K8s node local storage" +description: > + ZooKeeper cluster migration when using K8s node local storage +--- + +Describes how to migrate a ZooKeeper cluster when using K8s node-local storage such as static PV, `local-path`, `TopoLVM`. + +Requires HA setup (3+ pods). + +This solution is more risky than [migration by adding followers]({{< ref "altinity-kb-zookeeper-cluster-migration" >}}) because it reduces +the number of active consensus members but is operationally simpler. When running with `clickhouse-keeper`, it can be +performed gracefully so that quorum is maintained during the whole operation. + + +1. Find the leader pod and note its name + 1. To detect leader run `echo stat | nc 127.0.0.1 2181 | grep leader` inside pods +1. Make sure the ZK cluster is healthy and all nodes are in sync + 1. (run on leader) `echo mntr | nc 127.0.0.1 2181 | grep zk_synced_followers` should be N-1 for N member cluster +1. Pick the first **non-leader** pod and delete its `PVC`, + 1. `kubectl delete --wait=false pvc clickhouse-keeper-data-0` -> status should be `Terminating` + 1. Also delete `PV` if your `StorageClass` reclaim policy is set to `Retain` +1. If you are using dynamic volume provisioning make adjustments based on your k8s infrastructure (such as moving labels and taints or cordoning node) so that after pod delete the new one will be scheduled on the planned node + 1. `kubectl label node planned-node dedicated=zookeeper` + 1. `kubectl label node this-pod-node dedicated-` + 1. `kubectl taint node planned-node dedicated=zookeeper:NoSchedule` + 1. `kubectl taint node this-pod-node dedicated=zookeeper:NoSchedule-` +1. For manual volume provisioning wait till a new `PVC` is created and then provision volume on the planned node +1. Delete the first non-leader pod and wait for its PV to be deleted + 1. `kubectl delete pod clickhouse-keeper-0` + 1. `kubectl wait --for=delete pv/pvc-0a823311-616f-4b7e-9b96-0c059c62ab3b --timeout=120s` +1. Wait for the new pod to be scheduled and volume provisioned (or provision manual volume per instructions above) +1. Ensure new member joined and synced + 1. (run on leader) `echo mntr | nc 127.0.0.1 2181 | grep zk_synced_followers` should be N-1 for N member cluster +1. Repeat for all other non-leader pods +1. (ClickHouse® Keeper only), for Zookeeper you will need to force an election by stopping the leader + 1. Ask the current leader to yield leadership + 2. `echo ydld | nc 127.0.0.1 2181` -> should print something like `Sent yield leadership request to ...` + 3. * Make sure a different leader was elected by finding your new leader +1. Finally repeat for the leader pod + + diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/zookeeper-monitoring.md b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/zookeeper-monitoring.md index 7b2b5f0da3..fe32c6cc52 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/zookeeper-monitoring.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/zookeeper-monitoring.md @@ -5,8 +5,6 @@ description: > ZooKeeper Monitoring --- -# ZooKeeper Monitoring - ## ZooKeeper ### scrape metrics @@ -29,10 +27,9 @@ See also [https://grafana.com/grafana/dashboards?search=ZooKeeper&dataSource=pro ### See also -* [https://blog.serverdensity.com/how-to-monitor-zookeeper/](https://blog.serverdensity.com/how-to-monitor-zookeeper/) * [https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/\#zookeeper-metrics](https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/#zookeeper-metrics) -* [https://dzone.com/articles/monitoring-apache-zookeeper-servers](https://dzone.com/articles/monitoring-apache-zookeeper-servers) -* [https://docs.signalfx.com/en/latest/integrations/integrations-reference/integrations.zookeeper.html](https://docs.signalfx.com/en/latest/integrations/integrations-reference/integrations.zookeeper.html) +* [https://dzone.com/articles/monitoring-apache-zookeeper-servers](https://dzone.com/articles/monitoring-apache-zookeeper-servers) - note exhibitor is no longer maintained * [https://github.com/samber/awesome-prometheus-alerts/blob/c3ba0cf1997c7e952369a090aeb10343cdca4878/\_data/rules.yml\#L1146-L1170](https://github.com/samber/awesome-prometheus-alerts/blob/c3ba0cf1997c7e952369a090aeb10343cdca4878/_data/rules.yml#L1146-L1170) \(or [https://awesome-prometheus-alerts.grep.to/rules.html\#zookeeper](https://awesome-prometheus-alerts.grep.to/rules.html#zookeeper) \) * [https://alex.dzyoba.com/blog/prometheus-alerts/](https://alex.dzyoba.com/blog/prometheus-alerts/) * [https://docs.datadoghq.com/integrations/zk/?tab=host](https://docs.datadoghq.com/integrations/zk/?tab=host) +* [https://statuslist.app/uptime-monitoring/zookeeper/](https://statuslist.app/uptime-monitoring/zookeeper/) diff --git a/content/en/altinity-kb-setup-and-maintenance/altinity-packaging-compatibility-greater-than-21.x-and-earlier.md b/content/en/altinity-kb-setup-and-maintenance/altinity-packaging-compatibility-greater-than-21.x-and-earlier.md index a0573c2f3b..850b5367cb 100644 --- a/content/en/altinity-kb-setup-and-maintenance/altinity-packaging-compatibility-greater-than-21.x-and-earlier.md +++ b/content/en/altinity-kb-setup-and-maintenance/altinity-packaging-compatibility-greater-than-21.x-and-earlier.md @@ -6,11 +6,11 @@ description: > --- ## Working with Altinity & Yandex packaging together -Since version 21.1 Altinity switches to the same packaging as used by Yandex. That is needed for syncing things and introduces several improvements (like adding systemd service file). +Since ClickHouse® version 21.1 Altinity switches to the same packaging as used by Yandex. That is needed for syncing things and introduces several improvements (like adding systemd service file). Unfortunately, that change leads to compatibility issues - automatic dependencies resolution gets confused by the conflicting package names: both when you update ClickHouse to the new version (the one which uses older packaging) and when you want to install older altinity packages (20.8 and older). -### Installing old clickhouse version (with old packaging schema) +### Installing old ClickHouse version (with old packaging schema) When you try to install versions 20.8 or older from Altinity repo - @@ -19,7 +19,7 @@ version=20.8.12.2-1.el7 yum install clickhouse-client-${version} clickhouse-server-${version} ``` -yum outputs smth like +yum outputs something like ```bash yum install clickhouse-client-${version} clickhouse-server-${version} diff --git a/content/en/altinity-kb-setup-and-maintenance/asynchronous_metrics_descr.md b/content/en/altinity-kb-setup-and-maintenance/asynchronous_metrics_descr.md index 0c997b7e4b..f3d9cfd1c2 100644 --- a/content/en/altinity-kb-setup-and-maintenance/asynchronous_metrics_descr.md +++ b/content/en/altinity-kb-setup-and-maintenance/asynchronous_metrics_descr.md @@ -13,7 +13,7 @@ jemalloc -- parameters of jemalloc allocator, they are not very useful, and not MarkCacheBytes / MarkCacheFiles -- there are cache for .mrk files (default size is 5GB), you can see is it use all 5GB or not -MemoryCode -- how much memory allocated for ClickHouse executable +MemoryCode -- how much memory allocated for ClickHouse® executable MemoryDataAndStack -- virtual memory allocated for data and stack diff --git a/content/en/altinity-kb-setup-and-maintenance/aws-ebs.md b/content/en/altinity-kb-setup-and-maintenance/aws-ebs.md deleted file mode 100644 index 91ad0233fe..0000000000 --- a/content/en/altinity-kb-setup-and-maintenance/aws-ebs.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: "AWS EBS" -linkTitle: "AWS EBS" -description: > - AWS EBS ---- - - - - - - - - - - - - - - - - - - - - -
Volume type - gp3gp2
Max throughput per volume - 1000 MiB/s250 MiB/s
Price - -

$0.08/GB-month

-

3,000 IOPS free and

-

$0.005/provisioned IOPS-month over 3,000;

-

125 MB/s free and

-

$0.04/provisioned MB/s-month over 125

-
$0.10/GB-month
- -### GP2 - -In usual conditions ClickHouse being limited by throughput of volumes and amount of provided IOPS doesn't make any big difference for performance starting from a certain number. So the most native choice for clickhouse is gp2 and gp3 volumes. - -‌Because gp2 volumes have a hard limit of 250 MiB/s per volume (for volumes bigger than 334 GB), it usually makes sense to split one big volume in multiple smaller volumes larger than 334GB in order to have maximum possible throughput. - -‌EC2 instances also have an EBS throughput limit, it depends on the size of the EC2 instance. That means if you would attach multiple volumes which would have high potential throughput, you would be limited by your EC2 instance, so usually there is no reason to have more than 4-5 volumes per node. - -It's pretty straightforward to set up a ClickHouse for using multiple EBS volumes with storage_policies. - -### GP3 - -It's a new type of volume, which is 20% cheaper than gp2 per GB-month and has lower free throughput: only 125 MB/s vs 250 MB/s. But you can buy additional throughput for volume and gp3 pricing became comparable with multiple gp2 volumes starting from 1000-1500GB size. It also works better if most of your queries read only one or several parts, because in that case you are not being limited by performance of a single ebs disk, as parts can be located only on one disk at once. - -For best performance, it's suggested to buy: -* 7000 IOPS -* Throughput up to the limit of your EC2 instance - - -[https://altinity.com/blog/2019/11/27/amplifying-clickhouse-capacity-with-multi-volume-storage-part-1](https://altinity.com/blog/2019/11/27/amplifying-clickhouse-capacity-with-multi-volume-storage-part-1) - -[https://altinity.com/blog/2019/11/29/amplifying-clickhouse-capacity-with-multi-volume-storage-part-2](https://altinity.com/blog/2019/11/29/amplifying-clickhouse-capacity-with-multi-volume-storage-part-2) - -[https://calculator.aws/\#/createCalculator/EBS?nc2=h_ql_pr_calc](https://calculator.aws/\#/createCalculator/EBS?nc2=h_ql_pr_calc) - -[https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) - -[https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) - -[https://aws.amazon.com/ebs/general-purpose/](https://aws.amazon.com/ebs/general-purpose/) diff --git a/content/en/altinity-kb-setup-and-maintenance/aws-ec2-storage.md b/content/en/altinity-kb-setup-and-maintenance/aws-ec2-storage.md new file mode 100644 index 0000000000..36b0d61c70 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/aws-ec2-storage.md @@ -0,0 +1,128 @@ +--- +title: "AWS EC2 Storage" +linkTitle: "AWS EC2 Storage" +description: > + AWS EBS, EFS, FSx, Lustre +aliases: + - "/altinity-kb-setup-and-maintenance/aws-ebs/" +--- + +# EBS + +Most native choose for ClickHouse® as fast storage, because it usually guarantees best throughput, IOPS, latency for reasonable price. + +[https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) + +[https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) + + +## General Purpose SSD volumes + +In usual conditions ClickHouse being limited by throughput of volumes and amount of provided IOPS doesn't make any big difference for performance starting from a certain number. So the most native choice for ClickHouse is gp3 and gp2 volumes. + +‌EC2 instances also have an EBS throughput limit, it depends on the size of the EC2 instance. That means if you would attach multiple volumes which would have high potential throughput, you would be limited by your EC2 instance, so usually there is no reason to have more than 1-3 GP3 volume or 4-5 GP2 volume per node. + +It's pretty straightforward to set up a ClickHouse for using multiple EBS volumes with jbod storage_policies. + +[general purpose](https://aws.amazon.com/ebs/general-purpose/) + + + + + + + + + + + + + + + + + + + + + +
Volume type + gp3gp2
Max throughput per volume + 1000 MiB/s250 MiB/s
Price + +

$0.08/GB-month

+

3,000 IOPS free and

+

$0.005/provisioned IOPS-month over 3,000;

+

125 MB/s free and

+

$0.04/provisioned MB/s-month over 125

+
$0.10/GB-month
+ + +### GP3 + +It's **recommended option**, as it allow you to have only one volume, for instances which have less than 10 Gbps EBS Bandwidth (nodes =<32 VCPU usually) and still have maximum performance. +For bigger instances, it make sense to look into option of having several GP3 volumes. + +It's a new type of volume, which is 20% cheaper than gp2 per GB-month and has lower free throughput: only 125 MiB/s vs 250 MiB/s. But you can buy additional throughput and IOPS for volume. It also works better if most of your queries read only one or several parts, because in that case you are not being limited by performance of a single EBS disk, as parts can be located only on one disk at once. + +Because, you need to have less GP3 volumes compared to GP2 option, it's suggested approach for now. + +For best performance, it's suggested to buy: +* 7000 IOPS +* Throughput up to the limit of your EC2 instance (1000 MiB/s is safe option) + + +### GP2 + +‌GP2 volumes have a hard limit of 250 MiB/s per volume (for volumes bigger than 334 GB), it usually makes sense to split one big volume in multiple smaller volumes larger than 334GB in order to have maximum possible throughput. + +## Throughput Optimized HDD volumes + +### ST1 + +Looks like a good candidate for cheap cold storage for old data with decent maximum throughput 500 MiB/s. But it achieved only for big volumes >5 TiB. + +[Throughput credits and burst performance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hdd-vols.html#EBSVolumeTypes_st1) + +## Provisioned IOPS SSD volumes + +### IO2 Block Express, IO2, IO1 + +In 99.99% cases doesn't give any benefit for ClickHouse compared to GP3 option and perform worse because maximum throughput is limited to 500 MiB/s per volume if you buy less than 32 000 IOPS, which is really expensive (compared to other options) and unneeded for ClickHouse. And if you have spare money, it's better to spend them on better EC2 instance. + +# S3 + +Best option for cold data, it can give considerably good throughput and really good price, but latencies and IOPS much worse than EBS option. +Another interesting point is, for EC2 instance throughput limit for EBS and S3 calculated separately, so if you access your data both from EBS and S3, you can get double throughput. + +It's stated in AWS documentation, that S3 can fully utilize network capacity of EC2 instance. (up to 100 Gb/s) +Latencies or (first-byte-out) estimated to be 100-200 milliseconds withing single region. + +It also recommended to enable [gateway endpoint for s3](https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints-s3.html#create-gateway-endpoint-s3), it can push throughput even further (up to 800 Gb/s) + +[S3 best practices](https://docs.aws.amazon.com/AmazonS3/latest/userguide/optimizing-performance.html) + +# EFS + +Works over NFSv4.1 version. +We have clients, which run their ClickHouse installations over NFS. It works considerably well as cold storage, so it's recommended to have EBS disks for hot data. A fast network is required. + +ClickHouse doesn't have any native option to reuse the same data on durable network disk via several replicas. You either need to store the same data twice or build custom tooling around ClickHouse and use it without Replicated*MergeTree tables. + +# FSx + +## Lustre + +We have several clients, who use Lustre (some of them use AWS FSx Lustre, another is self managed Lustre) without any big issue. Fast network is required. +There were known problems with data damage on older versions caused by issues with O_DIRECT or [async IO](https://lustre-discuss.lustre.narkive.com/zwcvyEEY/asynchronous-posix-i-o-with-lustre) support on Lustre. + +ClickHouse doesn't have any native option to reuse the same data on durable network disk via several replicas. You either need to store the same data twice or build custom tooling around ClickHouse and use it without Replicated*MergeTree tables. + +[https://altinity.com/blog/2019/11/27/amplifying-clickhouse-capacity-with-multi-volume-storage-part-1](https://altinity.com/blog/2019/11/27/amplifying-clickhouse-capacity-with-multi-volume-storage-part-1) + +[https://altinity.com/blog/2019/11/29/amplifying-clickhouse-capacity-with-multi-volume-storage-part-2](https://altinity.com/blog/2019/11/29/amplifying-clickhouse-capacity-with-multi-volume-storage-part-2) + +[https://calculator.aws/\#/createCalculator/EBS?nc2=h_ql_pr_calc](https://calculator.aws/\#/createCalculator/EBS?nc2=h_ql_pr_calc) + + + diff --git a/content/en/altinity-kb-setup-and-maintenance/cgroups_k8s.md b/content/en/altinity-kb-setup-and-maintenance/cgroups_k8s.md index c09f460402..83433582b3 100644 --- a/content/en/altinity-kb-setup-and-maintenance/cgroups_k8s.md +++ b/content/en/altinity-kb-setup-and-maintenance/cgroups_k8s.md @@ -6,9 +6,7 @@ description: >- cgroups and kubernetes cloud providers. --- -## cgroups and kubernetes cloud providers - -Why my ClickHouse is slow after upgrade to version 22.2 and higher? +Why my ClickHouse® is slow after upgrade to version 22.2 and higher? The probable reason is that ClickHouse 22.2 started to respect cgroups (Respect cgroups limits in max_threads autodetection. [#33342](https://github.com/ClickHouse/ClickHouse/pull/33342) ([JaySon](https://github.com/JaySon-Huang)). diff --git a/content/en/altinity-kb-setup-and-maintenance/ch-logs-2-json-vectordev.md b/content/en/altinity-kb-setup-and-maintenance/ch-logs-2-json-vectordev.md index ada0ae7181..a95ef981aa 100644 --- a/content/en/altinity-kb-setup-and-maintenance/ch-logs-2-json-vectordev.md +++ b/content/en/altinity-kb-setup-and-maintenance/ch-logs-2-json-vectordev.md @@ -1,12 +1,33 @@ --- -title: "Transformation Clickhouse logs to ndjson using Vector.dev" -linkTitle: "Clickhouse logs and Vector.dev" +title: "Transforming ClickHouse logs to ndjson using Vector.dev" +linkTitle: "ClickHouse logs and Vector.dev" weight: 100 description: >- - Transformation Clickhouse logs to ndjson using Vector.dev + Transforming ClickHouse logs to ndjson using Vector.dev --- -## Transformation Clickhouse logs to ndjson using Vector.dev" +### ClickHouse 22.8 + +Starting from 22.8 version, ClickHouse support writing logs in JSON format: + +``` + + + + + json + + +``` + + +## Transforming ClickHouse logs to ndjson using Vector.dev" ### Installation of vector.dev @@ -72,7 +93,7 @@ tail /var/log/clickhouse-server-json/clickhouse-server.2022-04-21.ndjson ### sink logs into ClickHouse table -Be carefull with logging ClickHouse messages into the same ClickHouse instance, it will cause endless recursive self-logging. +Be careful with logging ClickHouse messages into the same ClickHouse instance, it will cause endless recursive self-logging. ```sql create table default.clickhouse_logs( diff --git a/content/en/altinity-kb-setup-and-maintenance/change-me.md b/content/en/altinity-kb-setup-and-maintenance/change-me.md new file mode 100644 index 0000000000..2c53dbabe3 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/change-me.md @@ -0,0 +1,40 @@ +--- +title: "Replication: Can not resolve host of another ClickHouse® server" +linkTitle: "Replication: Can not resolve host of another ClickHouse® server" +weight: 100 +description: >- +--- + +### Symptom + +When configuring Replication the ClickHouse® cluster nodes are experiencing communication issues, and an error message appears in the log that states that the ClickHouse host cannot be resolved. + +``` + DNSResolver: Cannot resolve host (xxxxx), error 0: DNS error. + auto DB::StorageReplicatedMergeTree::processQueueEntry(ReplicatedMergeTreeQueue::SelectedEntryPtr)::(anonymous class)::operator()(DB::StorageReplicatedMergeTree::LogEntryPtr &) const: Code: 198. DB::Exception: Not found address of host: xxxx. (DNS_ERROR), +``` + +### Cause: + +The error message indicates that the host name of the one of the nodes of the cluster cannot be resolved by other cluster members, causing communication issues between the nodes. + +Each node in the replication setup pushes its Fully Qualified Domain Name (FQDN) to Zookeeper, and if other nodes cannot access it using its FQDN, this can cause issues. + +### Action: + +There are two possible solutions to this problem: + +1. Change the FQDN to allow other nodes to access it. This solution can also help to keep the environment more organized. To do this, use the following command to edit the hostname file: + +```sh +sudo vim /etc/hostname +``` + +Or use the following command to change the hostname: + +```sh +sudo hostnamectl set-hostname ... +``` + +2. Use the configuration parameter `` to specify the IP address or hostname that the nodes can use to communicate with each other. This solution can have some issues, such as the one described in this link: https://github.com/ClickHouse/ClickHouse/issues/2154. + To configure this parameter, refer to the documentation for more information: https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#interserver-http-host. diff --git a/content/en/altinity-kb-setup-and-maintenance/clickhouse-backup-diff.md b/content/en/altinity-kb-setup-and-maintenance/clickhouse-backup-diff.md index 7248dfe2fe..4cb5999eb2 100644 --- a/content/en/altinity-kb-setup-and-maintenance/clickhouse-backup-diff.md +++ b/content/en/altinity-kb-setup-and-maintenance/clickhouse-backup-diff.md @@ -6,20 +6,20 @@ description: > --- ### differential backups using clickhouse-backup -1. Download the latest clickhouse-backup for your platform https://github.com/AlexAkulov/clickhouse-backup/releases +1. Download the latest version of Altinity Backup for ClickHouse®: https://github.com/Altinity/clickhouse-backup/releases ```bash # ubuntu / debian -wget https://github.com/AlexAkulov/clickhouse-backup/releases/download/v1.0.0/clickhouse-backup_1.0.0_amd64.deb -sudo dpkg -i clickhouse-backup_1.0.0_amd64.deb +wget https://github.com/Altinity/clickhouse-backup/releases/download/v2.5.20/clickhouse-backup_2.5.20_amd64.deb +sudo dpkg -i clickhouse-backup_2.5.20_amd64.deb # centos / redhat / fedora -sudo yum install https://github.com/AlexAkulov/clickhouse-backup/releases/download/v1.0.0/clickhouse-backup-1.0.0-1.x86_64.rpm +sudo yum install https://github.com/Altinity/clickhouse-backup/releases/download/v2.5.20/clickhouse-backup-2.5.20-1.x86_64.rpm # other platforms -wget https://github.com/AlexAkulov/clickhouse-backup/releases/download/v1.0.0/clickhouse-backup.tar.gz +wget https://github.com/Altinity/clickhouse-backup/releases/download/v2.5.20/clickhouse-backup.tar.gz sudo mkdir /etc/clickhouse-backup/ sudo mv clickhouse-backup/config.yml /etc/clickhouse-backup/config.yml.example sudo mv clickhouse-backup/clickhouse-backup /usr/bin/ @@ -55,7 +55,7 @@ END chmod +x /opt/clickhouse-backup-diff/clickhouse-backup-cron.sh ``` -3. Create confuguration for clickhouse-backup +3. Create configuration for clickhouse-backup ``` # Check the example: /etc/clickhouse-backup/config.yml.example diff --git a/content/en/altinity-kb-setup-and-maintenance/clickhouse-backup.md b/content/en/altinity-kb-setup-and-maintenance/clickhouse-backup.md index 33d8024d3f..4787539a83 100644 --- a/content/en/altinity-kb-setup-and-maintenance/clickhouse-backup.md +++ b/content/en/altinity-kb-setup-and-maintenance/clickhouse-backup.md @@ -1,12 +1,12 @@ --- -title: "clickhouse-backup" -linkTitle: "clickhouse-backup" +title: "Altinity Backup for ClickHouse®" +linkTitle: "Altinity Backup for ClickHouse®" description: > - clickhouse-backup + backblaze + Altinity Backup for ClickHouse® + backblaze --- ### Installation and configuration -Download the latest `clickhouse-backup.tar.gz` from assets from [https://github.com/AlexAkulov/clickhouse-backup/releases](https://github.com/AlexAkulov/clickhouse-backup/releases) +Download the latest `clickhouse-backup.tar.gz` from assets from [https://github.com/Altinity/clickhouse-backup/releases](https://github.com/Altinity/clickhouse-backup/releases) This tar.gz contains a single binary of `clickhouse-backup` and an example of config file. @@ -14,17 +14,17 @@ Backblaze has s3 compatible API but requires empty acl parameter `acl: ""`. [https://www.backblaze.com/](https://www.backblaze.com/) has 15 days and free 10Gb S3 trial. -```yaml +```bash $ mkdir clickhouse-backup $ cd clickhouse-backup -$ wget https://github.com/AlexAkulov/clickhouse-backup/releases/download/1.0.0-beta2/clickhouse-backup.tar.gz +$ wget https://github.com/Altinity/clickhouse-backup/releases/download/v2.5.20/clickhouse-backup.tar.gz $ tar zxf clickhouse-backup.tar.gz $ rm clickhouse-backup.tar.gz - $ cat config.yml +``` +```yaml general: remote_storage: s3 - max_file_size: 1099511627776 disable_progress_bar: false backups_to_keep_local: 0 backups_to_keep_remote: 0 @@ -48,7 +48,7 @@ s3: access_key: 0****1 secret_key: K****1 bucket: "mybucket" - endpoint: s3.us-west-000.backblazeb2.com + endpoint: https://s3.us-west-000.backblazeb2.com region: us-west-000 acl: "" force_path_style: false @@ -72,7 +72,7 @@ select count() from test.test; └─────────┘ ``` -clickhouse-backup list should work without errors (it scans local and remote (s3) folders): +`clickhouse-backup list` should work without errors (it scans local and remote (s3) folders): ```bash $ sudo ./clickhouse-backup list -c config.yml diff --git a/content/en/altinity-kb-setup-and-maintenance/clickhouse-deployment-plan.md b/content/en/altinity-kb-setup-and-maintenance/clickhouse-deployment-plan.md index 6e456a6db8..065d84da91 100644 --- a/content/en/altinity-kb-setup-and-maintenance/clickhouse-deployment-plan.md +++ b/content/en/altinity-kb-setup-and-maintenance/clickhouse-deployment-plan.md @@ -1,21 +1,21 @@ --- -title: "Successful ClickHouse deployment plan" -linkTitle: "Successful ClickHouse deployment plan" +title: "Successful ClickHouse® deployment plan" +linkTitle: "Successful ClickHouse® deployment plan" weight: 100 description: >- - Successful ClickHouse deployment plan. + Successful ClickHouse® deployment plan --- -## Successful ClickHouse deployment plan +## Successful ClickHouse® deployment plan ### Stage 0. Build POC -1. Install single node clickhouse +1. Install single node ClickHouse - https://clickhouse.com/docs/en/getting-started/tutorial/ - https://clickhouse.com/docs/en/getting-started/install/ - https://docs.altinity.com/altinitystablebuilds/stablequickstartguide/ 2. Start with creating a single table (the biggest one), use MergeTree engine. Create 'some' schema (most probably it will be far from optimal). Prefer denormalized approach for all immutable dimensions, for mutable dimensions - consider dictionaries. -3. Load some amount of data (at least 5 Gb, and 10 mln rows) - preferable the real one, or as close to real as possible. Usully the simplest options are either through CSV / TSV files (or `insert into clickhouse_table select * FROM mysql(...) where ...`) +3. Load some amount of data (at least 5 Gb, and 10 mln rows) - preferable the real one, or as close to real as possible. Usually the simplest options are either through CSV / TSV files (or `insert into clickhouse_table select * FROM mysql(...) where ...`) 4. Create several representative queries. 5. Check the columns cardinality, and appropriate types, use minimal needed type 6. Review the partition by and order by. https://kb.altinity.com/engines/mergetree-table-engine-family/pick-keys/ @@ -38,16 +38,16 @@ description: >- - https://kb.altinity.com/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/network-configuration/ 9. If you need sharding - consider different sharding approaches. -### Stage 2. Preprod setup & developement +### Stage 2. Preprod setup & development -1. Install clickhouse in cluster - several nodes / VMs + zookeeper +1. Install ClickHouse in cluster - several nodes / VMs + zookeeper - https://kb.altinity.com/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/cluster-configuration-process/ - https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/altinity-kb-proper-setup/ - https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/install_ubuntu/ 2. Create good config & automate config / os / restarts (ansible / puppet etc) - https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-settings-to-adjust/ - for docker: https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-clickhouse-in-docker/ - - for k8, use clickhouse-operator OR https://kb.altinity.com/altinity-kb-kubernetes/altinity-kb-possible-issues-with-running-clickhouse-in-k8s/ + - for k8s, use the Altinity Kubernetes Operator for ClickHouse OR https://kb.altinity.com/altinity-kb-kubernetes/altinity-kb-possible-issues-with-running-clickhouse-in-k8s/ 3. Set up monitoring / log processing / alerts etc. - https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-monitoring/#build-your-own-monitoring 4. Set up users. @@ -56,7 +56,7 @@ description: >- - https://kb.altinity.com/altinity-kb-setup-and-maintenance/schema-migration-tools/ 6. Design backup / failover strategies: - https://clickhouse.com/docs/en/operations/backup/ - - https://github.com/AlexAkulov/clickhouse-backup + - https://github.com/Altinity/clickhouse-backup 7. Develop pipelines / queries, create test suite, CI/CD 8. Do benchmark / stress tests 9. Test configuration changes / server restarts / failovers / version upgrades diff --git a/content/en/altinity-kb-setup-and-maintenance/clickhouse-operator.md b/content/en/altinity-kb-setup-and-maintenance/clickhouse-operator.md index e84894a7bf..9e77b43baa 100644 --- a/content/en/altinity-kb-setup-and-maintenance/clickhouse-operator.md +++ b/content/en/altinity-kb-setup-and-maintenance/clickhouse-operator.md @@ -1,11 +1,11 @@ --- -title: "ClickHouse operator" +title: "Altinity Kubernetes Operator For ClickHouse®" linkTitle: "ClickHouse operator" weight: 100 description: >- - ClickHouse operator + Altinity Kubernetes Operator For ClickHouse® --- -## ClickHouse operator +## Altinity Kubernetes Operator for ClickHouse® Documentation https://github.com/Altinity/clickhouse-operator/blob/master/docs/README.md diff --git a/content/en/altinity-kb-setup-and-maintenance/clickhouse-versions.md b/content/en/altinity-kb-setup-and-maintenance/clickhouse-versions.md index 1d9858fe63..4654857acd 100644 --- a/content/en/altinity-kb-setup-and-maintenance/clickhouse-versions.md +++ b/content/en/altinity-kb-setup-and-maintenance/clickhouse-versions.md @@ -1,10 +1,10 @@ --- -title: "ClickHouse versions" -linkTitle: "ClickHouse versions" +title: "ClickHouse® versions" +linkTitle: "ClickHouse® versions" description: > - ClickHouse versions + ClickHouse® versions --- -## ClickHouse versioning schema +## ClickHouse® versioning schema ![ClickHouse Version Breakdown](/assets/illyustraciya_bez_nazvaniya.png) @@ -44,22 +44,7 @@ See also: [https://clickhouse.tech/docs/en/faq/operations/production/](https://c ## How do I upgrade? -{{% alert title="Warning" color="warning" %}} -Check upgrade / downgrade scenario on staging first. -{{% /alert %}} - -1. check if you need to adjust some settings / to opt-out some new features you don't need (maybe needed to to make the downgrade path possible, or to make it possible for 2 versions to work together). -2. [upgrade packages](https://docs.altinity.com/altinitystablerelease/stablequickstartguide/) on odd replicas -3. (if needed / depends on use case) stop ingestion into odd replicas / remove them for load-balancer etc. -4. restart clickhouse-server service on odd replicas. -5. once odd replicas will go back online - repeat the same procedure on the even replicas. - -In some upgrade scenarios (depending from which version to which you do upgrate) when differerent replicas use different clickhouse versions you may see following issues: - -1. the replication don't work at all and delays grow. -2. errors about 'checksum mismatch' and traffic between replicase increase (they need to resync merge results). - -Both problems will go away once all replicas will be upgraded. +Follow this KB article for [ClickHouse version upgrade](https://kb.altinity.com/upgrade/) ## Bugs? @@ -67,11 +52,11 @@ ClickHouse development process goes in a very high pace and has already thousand All core features are well-tested, and very stable, and code is high-quality. But as with any other software bad things may happen. Usually the most of bugs happens in the new, freshly added functionality, and in some complex combination of several features (of course all possible combinations of features just physically can’t be tested). Usually new features are adopted by the community and stabilize quickly. -### What should I do if I found a bug in clickhouse? +### What should I do if I found a bug in ClickHouse? 1. First of all: try to upgrade to the latest bugfix release Example: if you use v21.3.5.42-lts but you know that v21.3.10.1-lts already exists - start with upgrade to that. Upgrades to latest maintenance releases are smooth and safe. 2. Look for similar issues in github. Maybe the fix is on the way. 3. If you can reproduce the bug: try to isolate it - remove some pieces of query one-by-one / simplify the scenario until the issue still reproduces. This way you can figure out which part is responsible for that bug, and you can try to create [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) 4. Once you have minimal reproducible example: 1. report it to github (or to Altinity Support) - 2. check if it reproduces on newer clickhouse versions + 2. check if it reproduces on newer ClickHouse versions diff --git a/content/en/altinity-kb-setup-and-maintenance/client-timeouts.md b/content/en/altinity-kb-setup-and-maintenance/client-timeouts.md new file mode 100644 index 0000000000..7d5363da3f --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/client-timeouts.md @@ -0,0 +1,69 @@ +--- +title: "Client Timeouts" +linkTitle: "Client Timeouts" +weight: 100 +description: >- + How to prevent connection errors. +--- + +Timeout settings are related to the client, server, and network. They can be tuned to solve sporadic timeout issues. + +It's important to understand that network devices (routers, NATs, load balancers ) could have their own timeouts. Sometimes, they won't respect TCP keep-alive and close the session due to inactivity. Only application-level keepalives could prevent TCP sessions from closing. + +Below are the settings that will work only if you set them in the default user profile. The problem is that they should be applied before the connection happens. And if you send them with a query/connection, it's already too late: +```sql +SETTINGS + receive_timeout = 3600, + send_timeout = 3600, + http_receive_timeout = 3600, + http_send_timeout = 3600, + http_connection_timeout = 2 +``` + +Those can be set on the query level (but in the profile, too): +```sql +SETTINGS + tcp_keep_alive_timeout = 3600, + --!!!send_progress_in_http_headers = 1, + http_headers_progress_interval_ms = 10000, + http_wait_end_of_query = 1, + max_execution_time = 3600 +``` + +https://clickhouse.com/docs/en/integrations/language-clients/javascript#keep-alive-nodejs-only + +`send_progress_in_http_headers`  will not be applied in this way because here we can configure the JDBC driver’s client options only ([this](https://github.com/ClickHouse/clickhouse-java/blob/main/clickhouse-client/src/main/java/com/clickhouse/client/config/ClickHouseClientOption.java)), but there is an option called `custom_settings`  ([this](https://github.com/ClickHouse/clickhouse-java/blob/main/clickhouse-client/src/main/java/com/clickhouse/client/config/ClickHouseClientOption.java#L34C22-L34C37)) that will apply custom ch query settings for every query before the actual connection is created. The correct JDBC connection string will look like this: + +``` +jdbc:clickhouse://"${clickhouse.host}"/"${clickhouse.db}"?ssl=true&socket_timeout=3600000&socket_keepalive=true&custom_settings=send_progress_in_http_headers=1 +``` + +### Description + +- `http_send_timeout & send_timeout`: The timeout for sending data to the socket. If the server takes longer than this value to send data, the connection will be terminated (i.e., when the server pushes data to the client, and the client is not reading that for some reason). +- `http_receive_timeout & receive_timeout:` The timeout for receiving data from the socket. If the server takes longer than this value to receive the entire request from the client, the connection will be terminated. This setting ensures that the server is not kept waiting indefinitely for slow or unresponsive clients (i.e., the server tries to get some data from the client, but the client does not send anything). +- `http_connection_timeout & connect_timeout`: Defines how long ClickHouse should wait when it connects to another server. If the connection cannot be established within this time frame, it will be terminated. This does not impact the clients which connect to ClickHouse using HTTP (it only matters when ClickHouse works as a TCP/HTTP client). +- `keep_alive_timeout`: This is for 'Connection: keep-alive' in HTTP 1.1, only for HTTP. It defines how long ClickHouse can wait for the next request in the same connection to arrive after serving the previous one. It does not lead to any SOCKET_TIMEOUT exception, just closes the socket if the client doesn't start a new request after that time. + + + +- `sync_request_timeout` – timeout for server ping. Defaults to 5 seconds. + +In some cases, if the data sync request time out, it may be caused by many different reasons, basically it shouldn't take more than 5 seconds for synchronous request-result protocol call (like Ping or TableStatus) in most of the normal circumstances, thus if time out setting too long, eg. 5 minutes or longer than that, then you will run into more overall performance issues. This is not good for any application on the server. + + +### How to check the current timeouts: + +```sql +SELECT + name, + value, + changed, + description +FROM system.settings +WHERE (name ILIKE '%send_timeout%') OR (name ILIKE '%receive_timeout%') OR (name ILIKE '%keep_alive%') OR (name ILIKE '%_http_headers') OR (name ILIKE 'http_headers_progres_%') OR (name ILIKE 'http_connection_%') +``` + diff --git a/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/_index.md b/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/_index.md index 5aea201bf1..f6fb8c35ec 100644 --- a/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/_index.md +++ b/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/_index.md @@ -6,7 +6,7 @@ description: > --- -Moving from a single ClickHouse server to a clustered format provides several benefits: +Moving from a single ClickHouse® server to a clustered format provides several benefits: * Replication guarantees data integrity. * Provides redundancy. diff --git a/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/cluster-configuration-faq.md b/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/cluster-configuration-faq.md index 7b208e1d43..45b4a4b483 100644 --- a/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/cluster-configuration-faq.md +++ b/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/cluster-configuration-faq.md @@ -4,9 +4,9 @@ linkTitle: "Cluster Configuration FAQ" description: > Cluster Configuration FAQ --- -## ClickHouse does not start, some other unexpected behavior happening +## ClickHouse® does not start, some other unexpected behavior happening -Check clickhouse logs, they are your friends: +Check ClickHouse logs, they are your friends: tail -n 1000 /var/log/clickhouse-server/clickhouse-server.err.log \| less tail -n 10000 /var/log/clickhouse-server/clickhouse-server.log \| less @@ -17,9 +17,9 @@ See [our knowledge base article]({{ Cluster Configuration Process --- -So you set up 3 nodes with zookeeper (zookeeper1, zookeeper2, zookeeper3 - [How to install zookeer?](https://docs.altinity.com/operationsguide/clickhouse-zookeeper/)), and and 4 nodes with ClickHouse (clickhouse-sh1r1,clickhouse-sh1r2,clickhouse-sh2r1,clickhouse-sh2r2 - [how to install ClickHouse?](https://docs.altinity.com/altinitystablerelease/stablequickstartguide/)). Now we need to make them work together. +So you set up 3 nodes with zookeeper (zookeeper1, zookeeper2, zookeeper3 - [How to install zookeeper?](https://docs.altinity.com/operationsguide/clickhouse-zookeeper/)), and and 4 nodes with ClickHouse® (clickhouse-sh1r1,clickhouse-sh1r2,clickhouse-sh2r1,clickhouse-sh2r2 - [how to install ClickHouse?](https://docs.altinity.com/altinitystablerelease/stablequickstartguide/)). Now we need to make them work together. Use ansible/puppet/salt or other systems to control the servers’ configurations. @@ -106,7 +106,7 @@ Engine=Distributed('{cluster}', 'default', ' #### **Hardening ClickHouse Security** -**See** [https://docs.altinity.com/operationsguide/security/clickhouse-hardening-guide/](https://docs.altinity.com/operationsguide/security/clickhouse-hardening-guide/) +**See** [https://docs.altinity.com/operationsguide/security/](https://docs.altinity.com/operationsguide/security/) ### Additional Settings diff --git a/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/hardening-clickhouse-security.md b/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/hardening-clickhouse-security.md index edd2e76778..a2acf8481c 100644 --- a/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/hardening-clickhouse-security.md +++ b/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/hardening-clickhouse-security.md @@ -6,15 +6,15 @@ description: > --- -ClickHouse is currently at the design stage of creating some universal backup solution. Some custom backup strategies are: +ClickHouse® is currently at the design stage of creating some universal backup solution. Some custom backup strategies are: 1. Each shard is backed up separately. 2. FREEZE the table/partition. For more information, see [Alter Freeze Partition](https://clickhouse.tech/docs/en/sql-reference/statements/alter/partition/#alter_freeze-partition). 1. This creates hard links in shadow subdirectory. 3. rsync that directory to a backup location, then remove that subfolder from shadow. 1. Cloud users are recommended to use [Rclone](https://rclone.org/). -4. Always add the full contents of the metadata subfolder that contains the current DB schema and clickhouse configs to your backup. +4. Always add the full contents of the metadata subfolder that contains the current DB schema and ClickHouse configs to your backup. 5. For a second replica, it’s enough to copy metadata and configuration. -6. Data in clickhouse is already compressed with lz4, backup can be compressed bit better, but avoid using cpu-heavy compression algorythms like gzip, use something like zstd instead. +6. Data in ClickHouse is already compressed with lz4, backup can be compressed bit better, but avoid using cpu-heavy compression algorithms like gzip, use something like zstd instead. -The tool automating that process [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup). +The tool automating that process: [Altinity Backup for ClickHouse](https://github.com/Altinity/clickhouse-backup). diff --git a/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/hardware-requirements.md b/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/hardware-requirements.md index d99785c776..48ffaf38c9 100644 --- a/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/hardware-requirements.md +++ b/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/hardware-requirements.md @@ -4,7 +4,7 @@ linkTitle: "Hardware Requirements" description: > Hardware Requirements --- -### **ClickHouse** +### ClickHouse® ClickHouse will use all available hardware to maximize performance. So the more hardware - the better. As of this publication, the hardware requirements are: @@ -16,7 +16,7 @@ ClickHouse will use all available hardware to maximize performance. So the more For clouds: disk throughput is the more important factor compared to IOPS. Be aware of burst / baseline disk speed difference. -See also: [https://clickhouse.tech/benchmark/hardware/](https://clickhouse.tech/benchmark/hardware/) +See also: [https://benchmark.clickhouse.com/hardware/](https://benchmark.clickhouse.com/hardware/) ### **Zookeeper** diff --git a/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/network-configuration.md b/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/network-configuration.md index 63cf15cd37..baa90c26c1 100644 --- a/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/network-configuration.md +++ b/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/network-configuration.md @@ -8,9 +8,9 @@ description: > ### **Networking And Server Room Planning** -The network used for your ClickHouse cluster should be a fast network, ideally 10 Gbit or more. +The network used for your ClickHouse® cluster should be a fast network, ideally 10 Gbit or more. ClickHouse nodes generate a lot of traffic to exchange the data between nodes (port 9009 for replication, and 9000 for distributed queries). -Zookeeper traffic in normal circumstanses is moderate, but in some special cases can also be very significant. +Zookeeper traffic in normal circumstances is moderate, but in some special cases can also be very significant. For the zookeeper low latency is more important than bandwidth. diff --git a/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/version-upgrades.md b/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/version-upgrades.md deleted file mode 100644 index cb990fa5e6..0000000000 --- a/content/en/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/version-upgrades.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: "Version Upgrades" -linkTitle: "Version Upgrades" -description: > - Version Upgrades ---- -Update itself is simple: update packages, restart clickhouse-server service afterwards. - -1. Check if the version you want to upgrade to is stable. We highly recommend the Altinity ClickHouse Stable Releases. - 1. Review the changelog to ensure that no configuration changes are needed. -2. Update staging and test to verify all systems are working. -3. Prepare and test downgrade procedures so the server can be returned to the previous version if necessary. -4. Start with a “canary” update. This is one replica with one shard that is upgraded to make sure that the procedure works. -5. Test and verify that everything works properly. Check for any errors in the log files. -6. If everything is working well, update the rest of the cluster. - -For small clusters, the [BlueGreenDeployment technique](https://martinfowler.com/bliki/BlueGreenDeployment.html) is also a good option. -**** diff --git a/content/en/altinity-kb-setup-and-maintenance/configure_clickhouse_for_low_mem_envs.md b/content/en/altinity-kb-setup-and-maintenance/configure_clickhouse_for_low_mem_envs.md new file mode 100644 index 0000000000..47936f7f0e --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/configure_clickhouse_for_low_mem_envs.md @@ -0,0 +1,91 @@ +--- +title: "Configure ClickHouse® for low memory environments" +linkTitle: "Configure ClickHouse® for low memory environments" +description: > + Configure ClickHouse® for low memory environments +--- + +While Clickhouse® it's typically deployed on powerful servers with ample memory and CPU, it can be deployed in resource-constrained environments like a Raspberry Pi. Whether you're working on edge computing, IoT data collection, or simply experimenting with ClickHouse in a small-scale setup, running it efficiently on low-memory hardware can be a rewarding challenge. + +TLDR; + +```xml + + + + + + + + + + + + false + + + 268435456 + 67108864 + 16777216 + + + 2000 + 64 + 8 + 0.75 + 0 + + + 2 + 2 + + 1024 + 1073741824 + 2 + 2 + 2 + + + + 1 + round_robin + 1 + 1 + 2 + 8 + 1 + 1 + 0 + 0 + +``` + +```xml + + + + + 2 + 8192 + 1000 + 600 + 0 + 0 + 3221225472 + 3221225472 + + + +``` + +Some interesting settings to explain: + +- Disabling both postgres/mysql interfaces will release some CPU/memory resources. +- Disabling some system tables like `processor_profile_log`, `opentelemetry_span_log`, or `query_thread_log` will help reducing write amplification. Those tables write a lot of data very frequently. In a Raspi4 with 4 GB of RAM and a simple USB3.1 storage they can spend some needed resources. +- Decrease mark caches. Defaults are 5GB and they are loaded into RAM (in newer versions this behavior of loading them completely in RAM can be tuned with a prewarm setting [https://github.com/ClickHouse/ClickHouse/pull/71053](https://github.com/ClickHouse/ClickHouse/pull/71053)) so better to reserve a reasonable amount of space in line with the total amount of RAM. For example for 4/8GB 256MB is a good value. +- Tune server memory and leave 25% for OS ops (`max_server_memory_usage_to_ram_ratio`) +- Tune the thread pools and queues for merges and mutations: + - `merge_max_block_size` will reduce the number of rows per block when merging. Default is 8192 and this will reduce the memory usage of merges. + - The `number_of_free_entries_in_pool` settings are very nice to tune how much concurrent merges are allowed in the queue. When there is less than specified number of free entries in pool , start to lower maximum size of merge to process (or to put in queue) or do not execute part mutations to leave free threads for regular merges . This is to allow small merges to process - not filling the pool with long running merges or multiple mutations. You can check clickhouse documentation to get more insights. +- Reduce the background pools and be conservative. In a Raspi4 with 4 cores and 4 GB or ram, background pool should be not bigger than the number of cores and even less if possible. +- Tune some profile settings to enable disk spilling (`max_bytes_before_external_group_by` and `max_bytes_before_external_sort`) and reduce the number of threads per query plus enable queuing of queries (`queue_max_wait_ms`) if the `max_concurrent_queries` limit is exceeded. Also `max_block_size` is not usually touched but in this case we can lower it ro reduce RAM usage. diff --git a/content/en/altinity-kb-setup-and-maintenance/connection-problems.md b/content/en/altinity-kb-setup-and-maintenance/connection-problems.md index a4d6116c90..203f1fe45a 100644 --- a/content/en/altinity-kb-setup-and-maintenance/connection-problems.md +++ b/content/en/altinity-kb-setup-and-maintenance/connection-problems.md @@ -1,17 +1,17 @@ --- -title: "Can not connect to my ClickHouse server" -linkTitle: "Can not connect to my ClickHouse server" +title: "Can not connect to my ClickHouse® server" +linkTitle: "Can not connect to my ClickHouse® server" weight: 100 description: >- - Can not connect to my ClickHouse server. + Can not connect to my ClickHouse® server. --- -## Can not connect to my ClickHouse server +## Can not connect to my ClickHouse® server Errors like "Connection reset by peer, while reading from socket" -1. Ensure that the clickhouse-server is running +1. Ensure that the `clickhouse-server` is running ```sh systemctl status clickhouse-server @@ -22,21 +22,21 @@ Errors like In case if you have a very high number of folders there (usually caused by a wrong partitioning, or a very high number of tables / databases) that startup time can take a lot of time (same can happen if disk is very slow, for example NFS). - You can check that by looking for 'Ready for connections' line in `/var/log/clickhouse-server/clickhouse-server.log` (`Information` log level neede) + You can check that by looking for 'Ready for connections' line in `/var/log/clickhouse-server/clickhouse-server.log` (`Information` log level needed) 2. Ensure you use the proper port ip / interface? Ensure you're not trying to connect to secure port without tls / https or vice versa. - For clickhouse-client - pay attention on host / port / secure flags. + For `clickhouse-client` - pay attention on host / port / secure flags. - Ensure the interface you're connecting to is the one which clickhouse listens (by default clickhouse listens only localhost). + Ensure the interface you're connecting to is the one which ClickHouse listens (by default ClickHouse listens only localhost). - Note: If you uncomment line `0.0.0.0` only - clickhouse will listen only ipv4 interfaces, - while the localhost (used by clickhouse-client) may be resolved to ipv6 address. And clickhouse-client may be failing to connect. + Note: If you uncomment line `0.0.0.0` only - ClickHouse will listen only ipv4 interfaces, + while the localhost (used by `clickhouse-client`) may be resolved to ipv6 address. And `clickhouse-client` may be failing to connect. - How to check which interfaces / ports do clickhouse listen? + How to check which interfaces / ports do ClickHouse listen? ```sh sudo lsof -i -P -n | grep LISTEN @@ -59,14 +59,14 @@ Errors like 4. Check for errors in /var/log/clickhouse-server/clickhouse-server.err.log ? -5. Is clickhouse able to serve some trivial tcp / http requests from localhost? +5. Is ClickHouse able to serve some trivial tcp / http requests from localhost? ```sh curl 127.0.0.1:9200 curl 127.0.0.1:8123 ``` -6. Check number of sockets opened by clickhouse +6. Check number of sockets opened by ClickHouse ```sh sudo lsof -i -a -p $(pidof clickhouse-server) diff --git a/content/en/altinity-kb-setup-and-maintenance/custom_settings.md b/content/en/altinity-kb-setup-and-maintenance/custom_settings.md index 46c0ef2613..89d42037f2 100644 --- a/content/en/altinity-kb-setup-and-maintenance/custom_settings.md +++ b/content/en/altinity-kb-setup-and-maintenance/custom_settings.md @@ -8,7 +8,7 @@ description: >- ## Using custom settings in config -You can not use the custom settings in config file 'as is', because clickhouse don't know which datatype should be used to parse it. +You can not use the custom settings in config file 'as is', because ClickHouse® don't know which datatype should be used to parse it. ```xml cat /etc/clickhouse-server/users.d/default_profile.xml diff --git a/content/en/altinity-kb-setup-and-maintenance/disk_encryption.md b/content/en/altinity-kb-setup-and-maintenance/disk_encryption.md new file mode 100644 index 0000000000..87d0d554dc --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/disk_encryption.md @@ -0,0 +1,173 @@ +--- +title: "ClickHouse® data/disk encryption (at rest)" +linkTitle: "disk encryption" +weight: 100 +description: >- + Example how to encrypt data in tables using storage policies. +--- + +## Create folder + +``` +mkdir /data/clickhouse_encrypted +chown clickhouse.clickhouse /data/clickhouse_encrypted +``` + +## Configure encrypted disk and storage + +* https://clickhouse.com/docs/en/operations/storing-data/#encrypted-virtual-file-system +* https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#server-settings-encryption + + +```xml +cat /etc/clickhouse-server/config.d/encrypted_storage.xml + + + + + local + /data/clickhouse_encrypted/ + + + encrypted + disk1 + encrypted/ + AES_128_CTR + 00112233445566778899aabbccddeeff + 0 + + + + + + + encrypted_disk + + + + + + +``` + +```bash +systemctl restart clickhouse-server +``` + +```sql +select name, path, type, is_encrypted from system.disks; +┌─name───────────┬─path──────────────────────────────────┬─type──┬─is_encrypted─┐ +│ default │ /var/lib/clickhouse/ │ local │ 0 │ +│ disk1 │ /data/clickhouse_encrypted/ │ local │ 0 │ +│ encrypted_disk │ /data/clickhouse_encrypted/encrypted/ │ local │ 1 │ +└────────────────┴───────────────────────────────────────┴───────┴──────────────┘ + +select * from system.storage_policies; +┌─policy_name─┬─volume_name──────┬─volume_priority─┬─disks──────────────┬─volume_type─┬─max_data_part_size─┬─move_factor─┬─prefer_not_to_merge─┐ +│ default │ default │ 1 │ ['default'] │ JBOD │ 0 │ 0 │ 0 │ +│ encrypted │ encrypted_volume │ 1 │ ['encrypted_disk'] │ JBOD │ 0 │ 0 │ 0 │ +└─────────────┴──────────────────┴─────────────────┴────────────────────┴─────────────┴────────────────────┴─────────────┴─────────────────────┘ +``` + +## Create table + +```sql +CREATE TABLE bench_encrypted(c_int Int64, c_str varchar(255), c_float Float64) +engine=MergeTree order by c_int +settings storage_policy = 'encrypted'; +``` + +```bash +cat /data/clickhouse_encrypted/encrypted/store/906/9061167e-d5f7-45ea-8e54-eb6ba3b678dc/format_version.txt +ENC�AdruM�˪h"��^� +``` + +# Compare performance of encrypted and not encrypted tables + +```sql +CREATE TABLE bench_encrypted(c_int Int64, c_str varchar(255), c_float Float64) +engine=MergeTree order by c_int +settings storage_policy = 'encrypted'; + +insert into bench_encrypted +select toInt64(cityHash64(number)), lower(hex(MD5(toString(number)))), number/cityHash64(number)*10000000 +from numbers_mt(100000000); + +0 rows in set. Elapsed: 33.357 sec. Processed 100.66 million rows, 805.28 MB (3.02 million rows/s., 24.14 MB/s.) + + +CREATE TABLE bench_unencrypted(c_int Int64, c_str varchar(255), c_float Float64) +engine=MergeTree order by c_int; + +insert into bench_unencrypted +select toInt64(cityHash64(number)), lower(hex(MD5(toString(number)))), number/cityHash64(number)*10000000 +from numbers_mt(100000000); + +0 rows in set. Elapsed: 31.175 sec. Processed 100.66 million rows, 805.28 MB (3.23 million rows/s., 25.83 MB/s.) + + +select avg(c_float) from bench_encrypted; +1 row in set. Elapsed: 0.195 sec. Processed 100.00 million rows, 800.00 MB (511.66 million rows/s., 4.09 GB/s.) + +select avg(c_float) from bench_unencrypted; +1 row in set. Elapsed: 0.150 sec. Processed 100.00 million rows, 800.00 MB (668.71 million rows/s., 5.35 GB/s.) + + +select sum(c_int) from bench_encrypted; +1 row in set. Elapsed: 0.281 sec. Processed 100.00 million rows, 800.00 MB (355.74 million rows/s., 2.85 GB/s.) + +select sum(c_int) from bench_unencrypted; +1 row in set. Elapsed: 0.193 sec. Processed 100.00 million rows, 800.00 MB (518.88 million rows/s., 4.15 GB/s.) + + +set max_threads=1; + +select avg(c_float) from bench_encrypted; +1 row in set. Elapsed: 0.934 sec. Processed 100.00 million rows, 800.00 MB (107.03 million rows/s., 856.23 MB/s.) + +select avg(c_float) from bench_unencrypted; +1 row in set. Elapsed: 0.874 sec. Processed 100.00 million rows, 800.00 MB (114.42 million rows/s., 915.39 MB/s.) +``` + +## read key_hex from environment variable + +* https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings/#server-settings-encryption +* https://serverfault.com/questions/413397/how-to-set-environment-variable-in-systemd-service + +```xml +cat /etc/clickhouse-server/config.d/encrypted_storage.xml + + + + + local + /data/clickhouse_encrypted/ + + + encrypted + disk1 + encrypted/ + AES_128_CTR + + + + + + + + encrypted_disk + + + + + + + +cat /etc/default/clickhouse-server +DiskKey=00112233445566778899aabbccddeeff +``` + +```bash +systemctl restart clickhouse-server +``` + diff --git a/content/en/altinity-kb-setup-and-maintenance/dr-two-dc.md b/content/en/altinity-kb-setup-and-maintenance/dr-two-dc.md new file mode 100644 index 0000000000..35e4a301fe --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/dr-two-dc.md @@ -0,0 +1,84 @@ +--- +title: "DR two DC" +linkTitle: "DR two DC" +weight: 100 +description: >- + Disaster Recovery configuration between two data centers +--- + +Clickhouse uses Keeper (or ZooKeeper) to inform other cluster nodes about changes. Clickhouse nodes then fetch new parts directly from other nodes in the cluster. The Keeper cluster is a key for building a DR schema. You can consider Keeper a “true” cluster while clickhouse-server nodes as storage access instruments. + +To implement a disaster recovery (DR) setup for ClickHouse across two physically separated data centers (A and B), with only one side active at a time, you can create a single ClickHouse cluster spanning both data centers. This setup will address data synchronization, replication, and coordination needs. + +## Cluster Configuration + +1. Create a single ClickHouse cluster with nodes in both data centers. +2. Configure the appropriate number of replicas and shards based on your performance and redundancy requirements. +3. Use ClickHouse Keeper or ZooKeeper for cluster coordination (see Keeper flavors discussion below). + +## Data Synchronization and Replication + +1. ClickHouse replicas operate in a master-master configuration, eliminating the need for a separate slave approach. +2. Configure replicas across both data centers to ensure data synchronization. +3. While both DCs have active replicas, consider DC B replicas as "passive" from the application's perspective. + +### Example Configuration: + +```xml + + + + + ch1.dc-a.company.com + + + ch2.dc-a.company.com + + + ch1.dc-b.company.com + + + ch2.dc-b.company.com + + + + + +``` + +## Keeper Setup + +1. In the active data center (DC A): + - Deploy 3 active Keeper nodes +2. In the passive data center (DC B): + - Deploy 1 Keeper node in observer role + +### Failover Process: + +In case of a failover: + +1. Shut down the ClickHouse cluster in DC A completely +2. Manually switch Keeper in DC B from observer to active participant (restart needed). +3. Create two additional Keeper nodes (they will replicate the state automatically). +4. Add two additional Keeper nodes to clickhouse configs + +## ClickHouse Keeper vs. ZooKeeper + +While ClickHouse Keeper is generally preferable for very high-load scenarios, ZooKeeper remains a viable option for many deployments. + +Considerations: + +- ClickHouse Keeper is optimized for ClickHouse operations and can handle higher loads. +- ZooKeeper is well-established and works well for many clients. + +The choice between ClickHouse Keeper and ZooKeeper is more about the overall system architecture and load patterns. + +## Configuration Synchronization + +To keep configurations in sync: + +1. Use ON CLUSTER clause for DDL statements +2. Store RBAC objects in Keeper +3. Implement a configuration management system (e.g., Ansible, Puppet) to simultaneously apply changes to clickhouse configuration files in config.d + + diff --git a/content/en/altinity-kb-setup-and-maintenance/executable-dictionary.md b/content/en/altinity-kb-setup-and-maintenance/executable-dictionary.md new file mode 100644 index 0000000000..1ec73617f3 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/executable-dictionary.md @@ -0,0 +1,94 @@ +--- +title: "Use an executable dictionary as cron task" +linkTitle: "Use an executable dictionary as cron task" +weight: 100 +description: > + If you need to execute scheduled tasks, you can use an executable dictionary like it was a cron task. +--- + +### Rationale + +Imagine that we need to restart clickhouse-server every saturday at 10:00 AM. We can use an executable dictionary to do this. Here is the approach and code necessary to do this. It can be used for other operations like INSERT into tables or execute some other imaginative tasks that need an scheduled execution. + +Let's create a simple table to register all the restarts scheduled by this dictionary: + + +```sql +CREATE TABLE restart_table +( + restart_datetime DateTime +) +ENGINE = TinyLog +``` + +### Configuration + +This is the ClickHouse configuration file we will be using for executable dictionaries. The dictionary is a dummy one (ignore the format and other stuff, we need format in the dict definition because if not it will fail loading), we don’t need it to do anything, just execute a script that has all the logic. The scheduled time is defined in the LIFETIME property of the dictionary (every 5 minutes dictionary will be refreshed and subsequently the script executed). Also for this case we need to load it on startup time setting lazy loading of dicts to false. + +```xml + + + /etc/clickhouse-server/config.d/*_dict.xml + false + + restart_dict + + + restart_id + UInt64 + + + + + restart_dict.sh + true + CSV + + + + + + 300 + + +``` + + + +### Action + +Now the restart logic (which can be different for other needs). In this case it will do nothing until the restart windows comes. During the restart window, we check if there has been a restart in the same window timeframe (if window is an hour the condition should be 1h). The script will issue a `SYSTEM SHUTDOWN` command to restart the server. The script will also insert a record in the restart_table to register the restart time. + +```bash +#!/bin/bash + +CLICKHOUSE_USER="admin" +CLICKHOUSE_PASSWORD="xxxxxxxxx" + +# Check if today is Saturday and the time is 10:00 AM CET or later +# Get current day of week (1-7, where 7 is Sunday) +# reload time for dict is 300 secs / 10 mins +current_day=$(date +%u) +# Get current time in hours and minutes +current_time=$(date +%H%M) + +# Check if today is Saturday (6) and the time is between 10:00 AM and 11:00 AM +if [[ $current_day -eq 6 && $current_time -ge 1000 && $current_time -lt 1100 ]]; then + # Get current date and time as timestamp + current_timestamp=$(date +%s) + last_restart_timestamp=$(clickhouse-client --user $CLICKHOUSE_USER --password $CLICKHOUSE_PASSWORD --query "SELECT max(toUnixTimestamp(restart_datetime)) FROM restart_table") + # Check if the last restart timestamp is within last hour, if not then restart + if [[ $(( current_timestamp - last_restart_timestamp )) -ge 3600 ]]; then + # Push data to log table and restart + echo $current_timestamp | clickhouse-client --user $CLICKHOUSE_USER --password $CLICKHOUSE_PASSWORD --query "INSERT INTO restart_table FORMAT TSVRaw" + clickhouse-client --user $CLICKHOUSE_USER --password $CLICKHOUSE_PASSWORD --query "SYSTEM SHUTDOWN" + fi +fi +``` + +### Improvements + +If the dictionary has a high frecuency refresh time, then clickhouse could end up executing that script multiple times using a lot of resources and creating processes that can look like 'stuck' ones. +To overcome this we can use the executable pool setting: https://clickhouse.com/docs/sql-reference/dictionaries#executable-pool + +Executable pool will spawn a pool of processes (similar as a pool of connections) with the specified command and keep them running until they exit, which is useful for heavy scripts/python and reduces the initialization impact of those on clickhouse. diff --git a/content/en/altinity-kb-setup-and-maintenance/filesystems.md b/content/en/altinity-kb-setup-and-maintenance/filesystems.md index a7aaf12a4a..2b5cf0bbc0 100644 --- a/content/en/altinity-kb-setup-and-maintenance/filesystems.md +++ b/content/en/altinity-kb-setup-and-maintenance/filesystems.md @@ -1,20 +1,18 @@ --- -title: "ClickHouse and different filesystems" -linkTitle: "ClickHouse and different filesystems" +title: "ClickHouse® and different filesystems" +linkTitle: "ClickHouse® and different filesystems" weight: 100 description: >- - ClickHouse and different filesystems. + ClickHouse® and different filesystems. --- -## ClickHouse and different filesystems - -In general ClickHouse should work with any POSIX-compatible filesystem. +In general ClickHouse® should work with any POSIX-compatible filesystem. * hard links and soft links support is mandatory. -* clickhouse can use O_DIRECT mode to bypass the cache (and async io) -* clickhouse can use renameat2 command for some atomic operations (not all the filesystems support that). +* ClickHouse can use O_DIRECT mode to bypass the cache (and async io) +* ClickHouse can use renameat2 command for some atomic operations (not all the filesystems support that). * depending on the schema and details of the usage the filesystem load can vary between the setup. The most natural load - is high throughput, with low or moderate IOPS. -* data is compressed in clickhouse (LZ4 by default), while indexes / marks / metadata files - no. Enabling disk-level compression can sometimes improve the compression, but can affect read / write speed. +* data is compressed in ClickHouse (LZ4 by default), while indexes / marks / metadata files - no. Enabling disk-level compression can sometimes improve the compression, but can affect read / write speed. ### ext4 @@ -33,7 +31,7 @@ We don't have real proofs/benchmarks though, example reports: others and they found that they accidentally set up those servers with XFS instead of Ext4. * in the system journal you can sometimes see reports like 'task XYZ blocked for more than 120 seconds' and stack trace pointing to XFS code (example: https://gist.github.com/filimonov/85b894268f978c2ccc18ea69bae5adbd ) * system goes to 99% io kernel under load sometimes. -* we have XFS, sometimes clickhouse goes to "sleep" because XFS daemon is doing smth unknown +* we have XFS, sometimes ClickHouse goes to "sleep" because XFS daemon is doing smth unknown Maybe the above problem can be workaround by some tuning/settings, but so far we do not have a working and confirmed way to do this. @@ -43,21 +41,19 @@ Limitations exist, extra tuning may be needed, and having more RAM is recommende Memory usage control - ZFS adaptive replacement cache (ARC) can take a lot of RAM. It can be the reason of out-of-memory issues when memory is also requested by the ClickHouse. -* It seems that the most important thing is zfs_arc_max - you just need to limit the maximum size of the ARC so that the sum of the maximum size of the arc + the CH itself does not exceed the size of the available RAM. For example, we set a limit of 80% RAM for Clickhouse and 10% for ARC. 10% will remain for the system and other applications +* It seems that the most important thing is zfs_arc_max - you just need to limit the maximum size of the ARC so that the sum of the maximum size of the arc + the CH itself does not exceed the size of the available RAM. For example, we set a limit of 80% RAM for ClickHouse and 10% for ARC. 10% will remain for the system and other applications Tuning: * another potentially interesting setting is primarycache=metadata, see benchmark example: https://www.ikus-soft.com/en/blog/2018-05-23-proxmox-primarycache-all-metadata/ * examples of tuning ZFS for MySQL https://wiki.freebsd.org/ZFSTuningGuide - perhaps some of this can also be useful (atime, recordsize) but everything needs to be carefully checked with benchmarks (I have no way). -* best practices - * https://efim360.ru/zfs-best-practices-guide/ - * https://pthree.org/2012/12/13/zfs-administration-part-viii-zpool-best-practices-and-caveats/ - -**important note**: ZFS does not support the `renameat2` command, which is used by the Atomic database engine, and +* best practices: https://efim360.ru/zfs-best-practices-guide/ + +**important note**: In versions before 2.2 ZFS does not support the `renameat2` command, which is used by the Atomic database engine, and therefore some of the Atomic functionality will not be available. -In old versions of clickhouse, you can face issues with the O_DIRECT mode. +In old versions of ClickHouse, you can face issues with the O_DIRECT mode. -Also there is a well-known (and controversional) Linus Torvalds opinion: "Don't Use ZFS on Linux" [[1]](https://www.realworldtech.com/forum/?threadid=189711&curpostid=189841), [[2]](https://arstechnica.com/gadgets/2020/01/linus-torvalds-zfs-statements-arent-right-heres-the-straight-dope/), [[3]](https://arstechnica.com/gadgets/2020/01/linus-torvalds-zfs-statements-arent-right-heres-the-straight-dope/). +Also there is a well-known (and controversial) Linus Torvalds opinion: "Don't Use ZFS on Linux" [[1]](https://www.realworldtech.com/forum/?threadid=189711&curpostid=189841), [[2]](https://arstechnica.com/gadgets/2020/01/linus-torvalds-zfs-statements-arent-right-heres-the-straight-dope/), [[3]](https://arstechnica.com/gadgets/2020/01/linus-torvalds-zfs-statements-arent-right-heres-the-straight-dope/). ### BTRFS @@ -72,11 +68,11 @@ Not enough information. There are reports that some people successfully use it in their setups. A fast network is required. -There were some reports about data damage on the disks on older clickhouse versions, which could be caused by the issues with O_DIRECT or [async io support](https://lustre-discuss.lustre.narkive.com/zwcvyEEY/asynchronous-posix-i-o-with-lustre) on Lustre. +There were some reports about data damage on the disks on older ClickHouse versions, which could be caused by the issues with O_DIRECT or [async io support](https://lustre-discuss.lustre.narkive.com/zwcvyEEY/asynchronous-posix-i-o-with-lustre) on Lustre. ### NFS (and EFS) -Accouding to the reports - it works, throughput depends a lot on the network speed. IOPS / number of file operations per seconds can be super low (due to the locking mechanism). +According to the reports - it works, throughput depends a lot on the network speed. IOPS / number of file operations per seconds can be super low (due to the locking mechanism). https://github.com/ClickHouse/ClickHouse/issues/31113 diff --git a/content/en/altinity-kb-setup-and-maintenance/high-cpu-usage.md b/content/en/altinity-kb-setup-and-maintenance/high-cpu-usage.md index 0d4db8f55a..fcf85417e7 100644 --- a/content/en/altinity-kb-setup-and-maintenance/high-cpu-usage.md +++ b/content/en/altinity-kb-setup-and-maintenance/high-cpu-usage.md @@ -1,10 +1,14 @@ --- -title: "High CPU usage" +title: "High CPU usage in ClickHouse®" linkTitle: "High CPU usage" description: > - High CPU usage + Getting CPU usage under control +keywords: + - clickhouse high cpu usage + - clickhouse cpu + - clickhouse cpu usage --- -In general, it is a NORMAL situation for clickhouse that while processing a huge dataset it can use a lot of (or all of) the server resources. It is 'by design' - just to make the answers faster. +In general, it is a NORMAL situation for ClickHouse® that while processing a huge dataset it can use a lot of (or all of) the server resources. It is 'by design' - just to make the answers faster. The main directions to reduce the CPU usage **is to review the schema / queries** to limit the amount of the data which need to be processed, and to plan the resources in a way when single running query will not impact the others. diff --git a/content/en/altinity-kb-setup-and-maintenance/how_to_recreate_table.md b/content/en/altinity-kb-setup-and-maintenance/how_to_recreate_table.md new file mode 100644 index 0000000000..bc8a1f722a --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/how_to_recreate_table.md @@ -0,0 +1,48 @@ +--- +title: "How to recreate a table in case of total corruption of the replication queue" +linkTitle: "How to recreate a table" +weight: 100 +description: >- + How to recreate a table in case of total corruption of the replication queue. +--- + +## How to fix a replication using hard-reset way + +1. Find the best replica (replica with the most fresh/consistent) data. +2. Backup the table `alter table mydatabase.mybadtable freeze;` +3. Stop all applications!!! Stop ingestion. Stop queries - table will be empty for some time. +4. Check that detached folder is empty or clean it. +```sql +SELECT concat('alter table ', database, '.', table, ' drop detached part \'', name, '\' settings allow_drop_detached=1;') +FROM system.detached_parts +WHERE (database = 'mydatabase') AND (table = 'mybadtable') +FORMAT TSVRaw; +``` +5. Make sure that detached folder is empty `select count() from system.detached_parts where database='mydatabase' and table ='mybadtable';` +6. Detach all parts (table will became empty) +```sql +SELECT concat('alter table ', database, '.', table, ' detach partition id \'', partition_id, '\';') AS detach +FROM system.parts +WHERE (active = 1) AND (database = 'mydatabase') AND (table = 'mybadtable') +GROUP BY detach +ORDER BY detach ASC +FORMAT TSVRaw; +``` +7. Make sure that table is empty `select count() from mydatabase.mybadtable;` +8. Attach all parts back +```sql +SELECT concat('alter table ', database, '.', table, ' attach part \'', a.name, '\';') +FROM system.detached_parts AS a +WHERE (database = 'mydatabase') AND (table = 'mybadtable') +FORMAT TSVRaw; +``` +9. Make sure that data is consistent at all replicas +```sql +SELECT + formatReadableSize(sum(bytes)) AS size, + sum(rows), + count() AS part_count, + uniqExact(partition) AS partition_count +FROM system.parts +WHERE (active = 1) AND (database = 'mydatabase') AND (table = 'mybadtable'); +``` diff --git a/content/en/altinity-kb-setup-and-maintenance/jemalloc_heap_profiling.md b/content/en/altinity-kb-setup-and-maintenance/jemalloc_heap_profiling.md new file mode 100644 index 0000000000..bb6c0eb383 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/jemalloc_heap_profiling.md @@ -0,0 +1,219 @@ +--- +title: "Jemalloc heap profiling" +linkTitle: "Jemalloc heap profiling" +weight: 100 +description: >- + Example of .xml config to enable remote pprof style access +--- + +## Config + +```xml + + + /etc/clickhouse-server/config.d/*_dict.xml + + + /pprof/heap + GET,POST + + static + file://jemalloc_clickhouse.heap + + + + /pprof/cmdline + GET + + predefined_query_handler + SELECT '/var/lib/clickhouse' FORMAT TSVRaw + + + + /pprof/symbol + GET + + predefined_query_handler + SELECT 'num_symbols: ' || count() FROM system.symbols FORMAT TSVRaw SETTINGS allow_introspection_functions = 1 + + + + /pprof/symbol + POST + + predefined_query_handler + WITH arrayJoin(splitByChar('+', {_request_body:String})) as addr SELECT addr || ' ' || demangle(addressToSymbol(reinterpretAsUInt64(reverse(substr(unhex(addr),2))))) SETTINGS allow_introspection_functions = 1 FORMAT TSVRaw + + + + + + jemalloc_ls + + + + id + String + + + + file + String + + + + size + UInt32 + + + + time + DateTime + + + + + + for f in /tmp/jemalloc_clickhouse.*; do [ -f "$f" ] || continue; echo -e "$(basename "$f" | cut -d. -f2-3)\t$f\t$(stat -c%s "$f")\t$(stat -c%Y "$f")"; done + false + TSV + + + + + + 300 + + + jemalloc_cp + + + id + UInt32 + + + status + UInt32 + + + + + + ver=${1:-$(head -n1 | tr -d "[:space:]")}; file=$(ls -t -- /tmp/jemalloc_clickhouse.*."$ver".heap 2>/dev/null | head -n1); if [ -n "$file" ] && cp -- "$file" /var/lib/clickhouse/user_files/jemalloc_clickhouse.heap; then printf '1\t\n'; else printf '0\t\n'; fi + false + TSV + + + + + + 300 + + +``` + +```sh +$ curl https://user:password@cluster.env.altinity.cloud:8443/pprof/cmdline +/var/lib/clickhouse + +$ curl https://user:password@cluster.env.altinity.cloud:8443/pprof/symbol +num_symbols: 702648 + +$ curl -d '0x0F99B044+0x008512D0' https://user:password@cluster.env.altinity.cloud:8443/pprof/symbol +0x0F99B044 DB::StorageSystemFilesystemCache::getColumnsDescription() +0x008512D0 icudt75_dat +``` + +```sql +cluster :) SYSTEM JEMALLOC ENABLE PROFILE; + +SYSTEM JEMALLOC ENABLE PROFILE + +Ok. + +0 rows in set. Elapsed: 0.270 sec. + +cluster :) SELECT uniqExact(number) FROM numbers_mt(1000000000); + +SELECT uniqExact(number) +FROM numbers_mt(1000000000) + +┌─uniqExact(number)─┐ +│ 1000000000 │ -- 1.00 billion +└───────────────────┘ + +1 row in set. Elapsed: 6.585 sec. Processed 1.00 billion rows, 8.00 GB (151.86 million rows/s., 1.21 GB/s.) +Peak memory usage: 25.19 GiB. + +cluster :) SYSTEM JEMALLOC FLUSH PROFILE; + +SYSTEM JEMALLOC FLUSH PROFILE + +Ok. + +0 rows in set. Elapsed: 0.272 sec. + +cluster :) SELECT * FROM dictionary('jemalloc_ls'); + +SELECT * +FROM dictionary('jemalloc_ls') + +┌─id─────┬─file──────────────────────────────┬───size─┬────────────────time─┐ +│ │ │ 0 │ 1970-01-01 00:00:00 │ +│ -e 8.0 │ /tmp/jemalloc_clickhouse.8.0.heap │ 108004 │ 2025-09-01 00:44:13 │ +│ -e 8.1 │ /tmp/jemalloc_clickhouse.8.1.heap │ 111115 │ 2025-09-01 00:46:46 │ +│ -e 8.2 │ /tmp/jemalloc_clickhouse.8.2.heap │ 128098 │ 2025-09-01 00:47:07 │ +│ -e 8.3 │ /tmp/jemalloc_clickhouse.8.3.heap │ 123980 │ 2025-09-01 00:48:14 │ +│ -e 8.4 │ /tmp/jemalloc_clickhouse.8.4.heap │ 124230 │ 2025-09-01 00:48:15 │ +│ -e 8.5 │ /tmp/jemalloc_clickhouse.8.5.heap │ 117733 │ 2025-09-01 12:18:53 │ +└────────┴───────────────────────────────────┴────────┴─────────────────────┘ + +7 rows in set. Elapsed: 0.021 sec. + +cluster :) SELECT dictGet('jemalloc_cp', 'status', 4); + +SELECT dictGet('jemalloc_cp', 'status', 4) + +┌─dictGet('jem⋯status', 4)─┐ +│ 0 │ +└──────────────────────────┘ + +1 row in set. Elapsed: 0.014 sec. +``` + +```sh +$ jeprof --svg https://user:password@cluster.env.altinity.cloud:8443/pprof/heap > ./mem.svg +Fetching /pprof/heap profile from https://user:password@cluster.env.altinity.cloud:8443/pprof/heap to + /home/user/jeprof/clickhouse.1756728952.user.pprof.heap +Wrote profile to /home/user/jeprof/clickhouse.1756728952.user.pprof.heap +Dropping nodes with <= 90.7 MB; edges with <= 18.1 abs(MB) +``` + +```sql +cluster :) SELECT dictGet('jemalloc_cp', 'status', 5); + +SELECT dictGet('jemalloc_cp', 'status', 5) + +┌─dictGet('jem⋯status', 5)─┐ +│ 0 │ +└──────────────────────────┘ + +1 row in set. Elapsed: 0.014 sec. +``` + +```sh +$ jeprof --svg https://user:password@cluster.env.altinity.cloud:8443/pprof/heap --base /home/user/jeprof/clickhouse.1756728952.user.pprof.heap > ./mem_diff.svg +Fetching /pprof/heap profile from https://user:password@cluster.env.altinity.cloud:8443/pprof/heap to + /home/user/jeprof/clickhouse.1756729237.user.pprof.heap +Wrote profile to /home/user/jeprof/clickhouse.1756729237.user.pprof.heap +``` + +``` +cluster :) SYSTEM JEMALLOC DISABLE PROFILE; + +SYSTEM JEMALLOC DISABLE PROFILE + +Ok. + +0 rows in set. Elapsed: 0.271 sec. +``` diff --git a/content/en/altinity-kb-setup-and-maintenance/load-balancers.md b/content/en/altinity-kb-setup-and-maintenance/load-balancers.md index f751d991af..8274175b44 100644 --- a/content/en/altinity-kb-setup-and-maintenance/load-balancers.md +++ b/content/en/altinity-kb-setup-and-maintenance/load-balancers.md @@ -6,27 +6,27 @@ description: > --- In general - one of the simplest option to do load balancing is to implement it on the client side. -I.e. list several endpoints for clickhouse connections and add some logic to pick one of the nodes. +I.e. list several endpoints for ClickHouse® connections and add some logic to pick one of the nodes. Many client libraries support that. ## ClickHouse native protocol (port 9000) -Currently there are no protocol-aware proxies for clickhouse protocol, so the proxy / load balancer can work only on TCP level. +Currently there are no protocol-aware proxies for ClickHouse protocol, so the proxy / load balancer can work only on TCP level. One of the best option for TCP load balancer is haproxy, also nginx can work in that mode. Haproxy will pick one upstream when connection is established, and after that it will keep it connected to the same server until the client or server will disconnect (or some timeout will happen). -It can’t send different queries coming via a single connection to different servers, as he knows nothing about clickhouse protocol and doesn't know when one query ends and another start, it just sees the binary stream. +It can’t send different queries coming via a single connection to different servers, as he knows nothing about ClickHouse protocol and doesn't know when one query ends and another start, it just sees the binary stream. So for native protocol, there are only 3 possibilities: 1) close connection after each query client-side 2) close connection after each query server-side (currently there is only one setting for that - idle_connection_timeout=0, which is not exact what you need, but similar). -3) use a clickhouse server with Distributed table as a proxy. +3) use a ClickHouse server with Distributed table as a proxy. ## HTTP protocol (port 8123) There are many more options and you can use haproxy / nginx / chproxy, etc. -chproxy give some extra clickhouse-specific features, you can find a list of them at [https://github.com/Vertamedia/chproxy](https://github.com/Vertamedia/chproxy) +chproxy give some extra ClickHouse-specific features, you can find a list of them at [https://chproxy.org](https://chproxy.org) diff --git a/content/en/altinity-kb-setup-and-maintenance/logging.md b/content/en/altinity-kb-setup-and-maintenance/logging.md index f262bdad98..ec8a481e6a 100644 --- a/content/en/altinity-kb-setup-and-maintenance/logging.md +++ b/content/en/altinity-kb-setup-and-maintenance/logging.md @@ -30,6 +30,6 @@ df -Th df -Thi ``` -Q. How to configure logging in clickhouse? +Q. How to configure logging in ClickHouse®? A. See [https://github.com/ClickHouse/ClickHouse/blob/ceaf6d57b7f00e1925b85754298cf958a278289a/programs/server/config.xml\#L9-L62](https://github.com/ClickHouse/ClickHouse/blob/ceaf6d57b7f00e1925b85754298cf958a278289a/programs/server/config.xml#L9-L62) diff --git a/content/en/altinity-kb-setup-and-maintenance/metric_log_ram.md b/content/en/altinity-kb-setup-and-maintenance/metric_log_ram.md new file mode 100644 index 0000000000..30e5e1943b --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/metric_log_ram.md @@ -0,0 +1,177 @@ +--- +title: "High Memory Usage During Merge in system.metric_log" +linkTitle: "Merge Memory in metric_log" +weight: 100 +description: >- + Resolving excessive memory consumption during merges in the ClickHouse® system.metric_log table. +--- + +## Overview + +In recent versions of ClickHouse®, the **merge process (part compaction)** in the `system.metric_log` table can consume a large amount of memory. +The issue arises due to an **unfortunate combination of settings**, where: + +* the merge is already large enough to produce **wide parts**, +* but not yet large enough to enable **vertical merges**. + +This problem has become more pronounced in newer ClickHouse® versions because the `system.metric_log` table has **expanded significantly** — many new metrics were added, increasing the total number of columns. + +> **Wide vs Compact** — storage formats for table parts: +> * *Wide* — each column is stored in a separate file (more efficient for large datasets). +> * *Compact* — all data is stored in a single file (more efficient for small inserts). +> +> **Horizontal vs Vertical merge** — algorithms for combining data during merges: +> * *Horizontal merge* reads and merges all columns at once — meaning all files are opened simultaneously, and buffers are allocated for each column and each part. +> * *Vertical merge* processes columns in batches — first merging only columns from `ORDER BY`, then the rest one by one. This approach **significantly reduces memory usage**. + +The most memory-intensive scenario is a **horizontal merge of wide parts** in a table with a large number of columns. + +--- + +## Demonstrating the Problem + +The issue can be reproduced easily by adjusting a few settings: + +```sql +ALTER TABLE system.metric_log MODIFY SETTING min_bytes_for_wide_part = 100; +OPTIMIZE TABLE system.metric_log FINAL; +```` + +Example log output: + +``` +[c9d66aa9f9d1] 2025.11.10 10:04:59.091067 [97] MemoryTracker: Background process (mutate/merge) peak memory usage: 6.00 GiB. +``` + +**The merge consumed 6 GB of memory** — far too much for this table. + +--- + +## Vertical Merges Are Not Affected + +If you explicitly force vertical merges, memory consumption normalizes, although the process becomes slightly slower: + +```sql +ALTER TABLE system.metric_log MODIFY SETTING + min_bytes_for_wide_part = 100, + vertical_merge_algorithm_min_rows_to_activate = 1; + +OPTIMIZE TABLE system.metric_log FINAL; +``` + +Example log output: + +``` +[c9d66aa9f9d1] 2025.11.10 10:06:14.575832 [97] MemoryTracker: Background process (mutate/merge) peak memory usage: 13.98 MiB. +``` + +Now memory usage **drops from 6 GB to only 14 MB**. + +--- + +## Root Cause + +The problem stems from the fact that: + +* the threshold for enabling *wide* parts is configured in **bytes** (`min_bytes_for_wide_part`); +* while the threshold for enabling *vertical merges* is configured in **rows** (`vertical_merge_algorithm_min_rows_to_activate`). + +When a table contains very **wide rows** (many lightweight columns), this mismatch causes wide parts to appear too early, while vertical merges are triggered much later. + +--- + +## Default Settings + +| Parameter | Value | +| ------------------------------------------------ | ---------------- | +| `vertical_merge_algorithm_min_rows_to_activate` | 131072 | +| `vertical_merge_algorithm_min_bytes_to_activate` | 0 | +| `min_bytes_for_wide_part` | 10485760 (10 MB) | +| `min_rows_for_wide_part` | 0 | + +The average row size in `metric_log` is approximately **2.8 KB**, meaning wide parts are created after roughly: + +``` +10485760 / 2800 ≈ 3744 rows +``` + +Meanwhile, the vertical merge algorithm activates only after **131 072 rows** — much later. + +--- + +## Possible Solutions + +1. **Increase `min_bytes_for_wide_part`** + For example, set it to at least `2800 * 131072 ≈ 350 MB`. + This delays the switch to the wide format until vertical merges can also be used. + +2. **Switch to a row-based threshold** + Use `min_rows_for_wide_part` instead of `min_bytes_for_wide_part`. + +3. **Lower the threshold for vertical merges** + Reduce `vertical_merge_algorithm_min_rows_to_activate`, + or add a value for `vertical_merge_algorithm_min_bytes_to_activate`. + +--- + +## Example Local Fix for `metric_log` + +Apply the configuration below, then restart ClickHouse® and drop the `metric_log` table (so it will be recreated with the updated settings): + +```xml + + system + metric_log
+ + ENGINE = MergeTree + PARTITION BY (event_date) + ORDER BY (event_time) + TTL event_date + INTERVAL 14 DAY DELETE + SETTINGS min_bytes_for_wide_part = 536870912; + + 7500 +
+``` + +This configuration increases the threshold for wide parts to **512 MB**, preventing premature switching to the wide format and reducing memory usage during merges. + +The PR [#89811](https://github.com/ClickHouse/ClickHouse/pull/89811) introduces a similar improvement. + +--- + +## Global Fix (All Tables) + +In addition to `metric_log`, other tables may also be affected — particularly those with **average row sizes greater than ~80 bytes** and **hundreds of columns**. + +```xml + + + 0 + 131072 + + +``` + +These settings tell ClickHouse® to **keep using compact parts longer** +and to **enable the vertical merge algorithm** simultaneously with the switch to the wide format, preventing sudden spikes in memory usage. + +Caution: the vertical merge directly from compact parts to wide part can be VERY slow. + +--- + +### ⚠️ Potential Risks and Trade-offs + +Raising `min_bytes_for_wide_part` globally keeps more data in **compact parts**, which can both help and hurt depending on workload. Compact parts store all columns in a single `data.bin` file — this makes **inserts much faster**, especially for tables with **many columns**, since fewer files are created per part. It’s also a big advantage when storing data on **S3 or other object storage**, where every extra file adds latency and increases API call counts. + +The trade-off is that this layout makes **reads less efficient** for column-selective queries. Reading one or two columns from a large compact part means scanning and decompressing shared blocks instead of isolated files. It can also reduce cache locality, slightly worsen compression (different columns compressed together), and make **mutations or ALTERs** more expensive because each change rewrites the entire part. + +Lowering thresholds for vertical merges further decreases merge memory but may make the first merges slower, as they process columns sequentially. This configuration works best for **wide, append-only tables** or **S3-based storage**, while analytical tables with frequent updates or narrow schemas may perform better with defaults. If merge memory or S3 request overhead is your main concern, applying it globally is reasonable — otherwise, start with specific wide tables like `system.metric_log`, verify performance improvements, and expand gradually. + +Additionally the the vertical merge directly from compact parts to wide part can be VERY slow. + +--- + +✅ **Summary** + +The root issue is a mismatch between byte-based and row-based thresholds for wide parts and vertical merges. +Aligning these values — by adjusting one or both parameters — stabilizes memory usage and prevents excessive RAM consumption during merges in `system.metric_log` and similar tables. diff --git a/content/en/altinity-kb-setup-and-maintenance/monitoring-operator-exporter-compatibility.md b/content/en/altinity-kb-setup-and-maintenance/monitoring-operator-exporter-compatibility.md new file mode 100644 index 0000000000..54d0549708 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/monitoring-operator-exporter-compatibility.md @@ -0,0 +1,150 @@ +--- +title: "Compatibility layer for the Altinity Kubernetes Operator for ClickHouse®" +linkTitle: "Compatibility layer for the Altinity Kubernetes Operator for ClickHouse®" +weight: 100 +description: >- + Page description for heading and indexes. +--- + +It's possible to expose `clickhouse-server` metrics in the style used by the Altinity Kubernetes Operator for ClickHouse®. It's for the `clickhouse-operator` grafana dashboard. + +```sql +CREATE VIEW system.operator_compatible_metrics +( + `name` String, + `value` Float64, + `help` String, + `labels` Map(String, String), + `type` String +) AS +SELECT + concat('chi_clickhouse_event_', event) AS name, + CAST(value, 'Float64') AS value, + description AS help, + map('hostname', hostName()) AS labels, + 'counter' AS type +FROM system.events +UNION ALL +SELECT + concat('chi_clickhouse_metric_', metric) AS name, + CAST(value, 'Float64') AS value, + description AS help, + map('hostname', hostName()) AS labels, + 'gauge' AS type +FROM system.metrics +UNION ALL +SELECT + concat('chi_clickhouse_metric_', metric) AS name, + value, + '' AS help, + map('hostname', hostName()) AS labels, + 'gauge' AS type +FROM system.asynchronous_metrics +UNION ALL +SELECT + 'chi_clickhouse_metric_MemoryDictionaryBytesAllocated' AS name, + CAST(sum(bytes_allocated), 'Float64') AS value, + 'Memory size allocated for dictionaries' AS help, + map('hostname', hostName()) AS labels, + 'gauge' AS type +FROM system.dictionaries +UNION ALL +SELECT + 'chi_clickhouse_metric_LongestRunningQuery' AS name, + CAST(max(elapsed), 'Float64') AS value, + 'Longest running query time' AS help, + map('hostname', hostName()) AS labels, + 'gauge' AS type +FROM system.processes +UNION ALL +WITH + ['chi_clickhouse_table_partitions', 'chi_clickhouse_table_parts', 'chi_clickhouse_table_parts_bytes', 'chi_clickhouse_table_parts_bytes_uncompressed', 'chi_clickhouse_table_parts_rows', 'chi_clickhouse_metric_DiskDataBytes', 'chi_clickhouse_metric_MemoryPrimaryKeyBytesAllocated'] AS names, + [uniq(partition), count(), sum(bytes), sum(data_uncompressed_bytes), sum(rows), sum(bytes_on_disk), sum(primary_key_bytes_in_memory_allocated)] AS values, + arrayJoin(arrayZip(names, values)) AS tpl +SELECT + tpl.1 AS name, + CAST(tpl.2, 'Float64') AS value, + '' AS help, + map('database', database, 'table', table, 'active', toString(active), 'hostname', hostName()) AS labels, + 'gauge' AS type +FROM system.parts +GROUP BY + active, + database, + table +UNION ALL +WITH + ['chi_clickhouse_table_mutations', 'chi_clickhouse_table_mutations_parts_to_do'] AS names, + [CAST(count(), 'Float64'), CAST(sum(parts_to_do), 'Float64')] AS values, + arrayJoin(arrayZip(names, values)) AS tpl +SELECT + tpl.1 AS name, + tpl.2 AS value, + '' AS help, + map('database', database, 'table', table, 'hostname', hostName()) AS labels, + 'gauge' AS type +FROM system.mutations +WHERE is_done = 0 +GROUP BY + database, + table +UNION ALL +WITH if(coalesce(reason, 'unknown') = '', 'detached_by_user', coalesce(reason, 'unknown')) AS detach_reason +SELECT + 'chi_clickhouse_metric_DetachedParts' AS name, + CAST(count(), 'Float64') AS value, + '' AS help, + map('database', database, 'table', table, 'disk', disk, 'hostname', hostName()) AS labels, + 'gauge' AS type +FROM system.detached_parts +GROUP BY + database, + table, + disk, + reason +ORDER BY name ASC +``` + +```sh +nano /etc/clickhouse-server/config.d/operator_metrics.xml + + + + /metrics + POST,GET + + predefined_query_handler + SELECT * FROM system.operator_compatible_metrics FORMAT Prometheus + text/plain; charset=utf-8 + + + + + / + POST,GET + no-cache + + dynamic_query_handler + query + + + + +``` + +```sh +curl http://localhost:8123/metrics +# HELP chi_clickhouse_metric_Query Number of executing queries +# TYPE chi_clickhouse_metric_Query gauge +chi_clickhouse_metric_Query{hostname="LAPTOP"} 1 + +# HELP chi_clickhouse_metric_Merge Number of executing background merges +# TYPE chi_clickhouse_metric_Merge gauge +chi_clickhouse_metric_Merge{hostname="LAPTOP"} 0 + +# HELP chi_clickhouse_metric_PartMutation Number of mutations (ALTER DELETE/UPDATE) +# TYPE chi_clickhouse_metric_PartMutation gauge +chi_clickhouse_metric_PartMutation{hostname="LAPTOP"} 0 +``` + + diff --git a/content/en/altinity-kb-setup-and-maintenance/precreate_parts_using_clickhouse_local.sh.md b/content/en/altinity-kb-setup-and-maintenance/precreate_parts_using_clickhouse_local.sh.md index 2b18c87e70..efef872913 100644 --- a/content/en/altinity-kb-setup-and-maintenance/precreate_parts_using_clickhouse_local.sh.md +++ b/content/en/altinity-kb-setup-and-maintenance/precreate_parts_using_clickhouse_local.sh.md @@ -8,14 +8,9 @@ description: >- ## Precreate parts using clickhouse-local -``` -rm -rf /tmp/precreate_parts - - -mkdir -p /tmp/precreate_parts/metadata/local/ - -cd /tmp/precreate_parts +the code below were testes on 23.3 +``` ## 1. Imagine we want to process this file: cat < /tmp/data.csv @@ -25,85 +20,49 @@ cat < /tmp/data.csv 4,2020-01-02,"String for first partition" EOF +rm -rf /tmp/precreate_parts +mkdir -p /tmp/precreate_parts +cd /tmp/precreate_parts + ## 2. that is the metadata for the table we want to fill ## schema should match the schema of the table from server ## (the easiest way is just to copy it from the server) ## I've added sleepEachRow(0.5) here just to mimic slow insert -cat < metadata/local/test.sql -ATTACH TABLE local.test (id UInt64, d Date, s String, x MATERIALIZED sleepEachRow(0.5)) Engine=MergeTree ORDER BY id PARTITION BY toYYYYMM(d); -EOF +clickhouse-local --path=. --query="CREATE DATABASE local" +clickhouse-local --path=. --query="CREATE TABLE local.test (id UInt64, d Date, s String, x MATERIALIZED sleepEachRow(0.5)) Engine=MergeTree ORDER BY id PARTITION BY toYYYYMM(d);" -## 3a. that is the metadata for the input file we want to read -## it should match the structure of source file +## 3. we can insert the input file into that table in different manners: -## use stdin to read from pipe +## a) just plain insert +cat /tmp/data.csv | clickhouse-local --path=. --query="INSERT INTO local.test FORMAT CSV" -cat < metadata/local/stdin.sql -ATTACH TABLE local.stdin (id UInt64, d Date, s String) Engine=File(CSV, stdin); -EOF +## b) use File on the top of stdin (allows to tune the types) +clickhouse-local --path=. --query="CREATE TABLE local.stdin (id UInt64, d Date, s String) Engine=File(CSV, stdin)" +cat /tmp/data.csv | clickhouse-local --path=. --query="INSERT INTO local.test SELECT * FROM local.stdin" -## 3b. Instead of stdin you can use file path +## c) Instead of stdin you can use file engine +clickhouse-local --path=. --query "CREATE TABLE local.data_csv (id UInt64, d Date, s String) Engine=File(CSV, '/tmp/data.csv')" +clickhouse-local --path=. --query "INSERT INTO local.test SELECT * FROM local.data_csv" -cat < metadata/local/data_csv.sql -ATTACH TABLE local.data_csv (id UInt64, d Date, s String) Engine=File(CSV, '/tmp/data.csv'); -EOF +# 4. now we have already parts created +clickhouse-local --path=. --query "SELECT _part,* FROM local.test ORDER BY id" +ls -la data/local/test/ -## All preparations done, -## the rest is simple: +# if needed we can even preprocess them more agressively - by doing OPTIMIZE ON that +clickhouse-local --path=. --query "OPTIMIZE TABLE local.test FINAL" -# option a (if 3a used) with pipe / reading stdin +# that works, but clickhouse will keep inactive parts (those 'unmerged') in place. +ls -la data/local/test/ -cat /tmp/data.csv | clickhouse-local --query "INSERT INTO local.test SELECT * FROM local.stdin" -- --path=. +# we can use a bit hacky way to force it to remove inactive parts them +clickhouse-local --path=. --query "ALTER TABLE local.test MODIFY SETTING old_parts_lifetime=0, cleanup_delay_period=0, cleanup_delay_period_random_add=0" -# option b (if 3b used) 0 with filepath -cd /tmp/precreate_parts -clickhouse-local --query "INSERT INTO local.test SELECT * FROM local.data_csv" -- --path=. - - -# now you can check what was inserted (i did both options so i have doubled data) - -clickhouse-local --query "SELECT _part,* FROM local.test ORDER BY id" -- --path=. -202001_4_4_0 1 2020-01-01 String -202001_1_1_0 1 2020-01-01 String -202002_5_5_0 2 2020-02-02 Another string -202002_2_2_0 2 2020-02-02 Another string -202003_6_6_0 3 2020-03-03 One more string -202003_3_3_0 3 2020-03-03 One more string -202001_4_4_0 4 2020-01-02 String for first partition -202001_1_1_0 4 2020-01-02 String for first partition - -# But you can't do OPTIMIZE (local will die with coredump) :) That would be too good -# clickhouse-local --query "OPTIMIZE TABLE local.test FINAL" -- --path=. - -## now you can upload those parts to a server (in detached subfolder) and attach them. - -mfilimonov@laptop5591:/tmp/precreate_parts$ ls -la data/local/test/ -total 40 -drwxrwxr-x 9 mfilimonov mfilimonov 4096 paź 15 11:15 . -drwxrwxr-x 3 mfilimonov mfilimonov 4096 paź 15 11:15 .. -drwxrwxr-x 2 mfilimonov mfilimonov 4096 paź 15 11:15 202001_1_1_0 -drwxrwxr-x 2 mfilimonov mfilimonov 4096 paź 15 11:15 202001_4_4_0 -drwxrwxr-x 2 mfilimonov mfilimonov 4096 paź 15 11:15 202002_2_2_0 -drwxrwxr-x 2 mfilimonov mfilimonov 4096 paź 15 11:15 202002_5_5_0 -drwxrwxr-x 2 mfilimonov mfilimonov 4096 paź 15 11:15 202003_3_3_0 -drwxrwxr-x 2 mfilimonov mfilimonov 4096 paź 15 11:15 202003_6_6_0 -drwxrwxr-x 2 mfilimonov mfilimonov 4096 paź 15 11:15 detached --rw-rw-r-- 1 mfilimonov mfilimonov 1 paź 15 11:15 format_version.txt - - -mfilimonov@laptop5591:/tmp/precreate_parts$ ls -la data/local/test/202001_1_1_0/ -total 44 -drwxrwxr-x 2 mfilimonov mfilimonov 4096 paź 15 11:15 . -drwxrwxr-x 9 mfilimonov mfilimonov 4096 paź 15 11:15 .. --rw-rw-r-- 1 mfilimonov mfilimonov 250 paź 15 11:15 checksums.txt --rw-rw-r-- 1 mfilimonov mfilimonov 79 paź 15 11:15 columns.txt --rw-rw-r-- 1 mfilimonov mfilimonov 1 paź 15 11:15 count.txt --rw-rw-r-- 1 mfilimonov mfilimonov 155 paź 15 11:15 data.bin --rw-rw-r-- 1 mfilimonov mfilimonov 144 paź 15 11:15 data.mrk3 --rw-rw-r-- 1 mfilimonov mfilimonov 10 paź 15 11:15 default_compression_codec.txt --rw-rw-r-- 1 mfilimonov mfilimonov 4 paź 15 11:15 minmax_d.idx --rw-rw-r-- 1 mfilimonov mfilimonov 4 paź 15 11:15 partition.dat --rw-rw-r-- 1 mfilimonov mfilimonov 16 paź 15 11:15 primary.idx +## needed to give background threads time to clean inactive parts (max_block_size allows to stop that quickly if needed) +clickhouse-local --path=. --query "SELECT count() FROM numbers(100) WHERE sleepEachRow(0.1) SETTINGS max_block_size=1" + +ls -la data/local/test/ +clickhouse-local --path=. --query "SELECT _part,* FROM local.test ORDER BY id" ``` + diff --git a/content/en/altinity-kb-setup-and-maintenance/rbac.md b/content/en/altinity-kb-setup-and-maintenance/rbac.md index adcaacedfb..0bb9ff74c9 100644 --- a/content/en/altinity-kb-setup-and-maintenance/rbac.md +++ b/content/en/altinity-kb-setup-and-maintenance/rbac.md @@ -1,6 +1,6 @@ --- -title: "Access Control and Account Management (RBAC)" -linkTitle: "RBAC example" +title: "ClickHouse® Access Control and Account Management (RBAC)" +linkTitle: "ClickHouse® RBAC example" weight: 100 description: >- Access Control and Account Management (RBAC). @@ -8,11 +8,12 @@ description: >- Documentation https://clickhouse.com/docs/en/operations/access-rights/ -## Enable RBAC and create admin user +## Enable ClickHouse® RBAC and create admin user Create an ```admin``` user like (root in MySQL or postgres in PostgreSQL) to do the DBA/admin ops in the `user.xml` file and [set the access management property for the admin user](https://clickhouse.com/docs/en/operations/access-rights/#enabling-access-control) ```xml + .... @@ -45,6 +46,7 @@ Create an ```admin``` user like (root in MySQL or postgres in PostgreSQL) to do 1 ... + ``` ## default user @@ -52,6 +54,7 @@ Create an ```admin``` user like (root in MySQL or postgres in PostgreSQL) to do As `default` is used for many internal and background operations, so it is not convenient to set it up with a password, because you would have to change it in many configs/parts. Best way to secure the default user is only allow localhost or trusted network connections like this in `users.xml`: ```xml + ...... @@ -59,19 +62,15 @@ As `default` is used for many internal and background operations, so it is not c 127.0.0.1/8 10.10.10.0/24 + ...... + ``` ## replication user -The replication user is usually `default`. Ports 9009 and 9010(tls) provide low-level data access between servers.This ports should not be accessible from untrusted networks. You can specify credentials for authenthication between replicas. This is required when `interserver_https_port` is accessible from untrusted networks. You can do so creating a user with the `default` profile: - -```sql -CREATE USER replication IDENTIFIED WITH sha256_password BY 'password' SETTINGS PROFILE 'default' -``` - -After this assign this user to the interserver credentials: +The replication user is defined by `interserver_http_credential` tag. It does not relate to a ClickHouse client credentials configuration. **If this tag is ommited then authentication is not used during replication.** Ports 9009 and 9010(tls) provide low-level data access between servers. This ports should not be accessible from untrusted networks. You can specify credentials for authentication between replicas. This is required when `interserver_https_port` is accessible from untrusted networks. You can do so by defining user and password to the interserver credentials. Then replication protocol will use basic access authentication when connecting by HTTP/HTTPS to other replicas: ```xml @@ -80,14 +79,6 @@ After this assign this user to the interserver credentials: ``` -We also can use sha256 passwords like this: - -```xml -65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5 -``` - -When the `CREATE USER` query is executed in the `clickhouse-client` it will echo the `sha256` digest to copy it wherever you need - ## Create users and roles Now we can setup users/roles using a generic best-practice approach for RBAC from other databases, like using roles, granting permissions to roles, creating users for different applications, etc... diff --git a/content/en/altinity-kb-setup-and-maintenance/recovery-after-complete-data-loss.md b/content/en/altinity-kb-setup-and-maintenance/recovery-after-complete-data-loss.md index 66436275f0..5e26e145c9 100644 --- a/content/en/altinity-kb-setup-and-maintenance/recovery-after-complete-data-loss.md +++ b/content/en/altinity-kb-setup-and-maintenance/recovery-after-complete-data-loss.md @@ -1,12 +1,12 @@ --- -title: "recovery-after-complete-data-loss" -linkTitle: "recovery-after-complete-data-loss" +title: "Recovery after complete data loss" +linkTitle: "Recovery after complete data loss" weight: 100 description: >- - Recovery after complete data loss + When disaster strikes --- -# Atomic & Ordinary databases. +## Atomic & Ordinary databases. srv1 -- good replica @@ -59,7 +59,7 @@ SELECT concat('CREATE DATABASE "', name, '" ENGINE = ', engine, ' COMMENT \'', c FROM system.databases WHERE name NOT IN ('INFORMATION_SCHEMA', 'information_schema', 'system', 'default'); -clickhouse-client < /home/denis.zhuravlev/generate_schema.sql > create_database.sql +clickhouse-client < /home/ubuntu/generate_schema.sql > create_database.sql ``` check the result diff --git a/content/en/altinity-kb-setup-and-maintenance/schema-migration-tools/_index.md b/content/en/altinity-kb-setup-and-maintenance/schema-migration-tools/_index.md index e10628b047..7fafdbb8b6 100644 --- a/content/en/altinity-kb-setup-and-maintenance/schema-migration-tools/_index.md +++ b/content/en/altinity-kb-setup-and-maintenance/schema-migration-tools/_index.md @@ -1,29 +1,50 @@ --- -title: "Schema migration tools for ClickHouse" -linkTitle: "Schema migration tools for ClickHouse" +title: "Schema migration tools for ClickHouse®" +linkTitle: "Schema migration tools for ClickHouse®" description: > - Schema migration tools for ClickHouse + Schema migration tools for ClickHouse® --- -* golang-migrate tool - see [golang-migrate](golang-migrate) -* bytebase - * [https://bytebase.com](https://bytebase.com) -* Flyway - there are a lot of PRs introducing ClickHouse support, maintainer doesn't merge them (maybe he will change his mind soon), but's it's not hard to build flyway from one of those PRs (latest at the top) - * [https://github.com/flyway/flyway/pull/3333](https://github.com/flyway/flyway/pull/3333) Сlickhouse support - * [https://github.com/flyway/flyway/pull/3134](https://github.com/flyway/flyway/pull/3134) Сlickhouse support - * [https://github.com/flyway/flyway/pull/3133](https://github.com/flyway/flyway/pull/3133) Add support clickhouse - * [https://github.com/flyway/flyway/pull/2981](https://github.com/flyway/flyway/pull/2981) Clickhouse replicated - * [https://github.com/flyway/flyway/pull/2640](https://github.com/flyway/flyway/pull/2640) Yet another ClickHouse support - * [https://github.com/flyway/flyway/pull/2166](https://github.com/flyway/flyway/pull/2166) Clickhouse support (\#1772) - * [https://github.com/flyway/flyway/pull/1773](https://github.com/flyway/flyway/pull/1773) Fixed \#1772: Add support for ClickHouse ([https://clickhouse.yandex/](https://clickhouse.yandex/)) +* [atlas](https://atlasgo.io) + * [https://atlasgo.io/guides/clickhouse](https://atlasgo.io/guides/clickhouse) +* golang-migrate tool - see [golang-migrate](./golang-migrate) * liquibase * [https://github.com/mediarithmics/liquibase-clickhouse](https://github.com/mediarithmics/liquibase-clickhouse) * [https://johntipper.org/how-to-execute-liquibase-changesets-against-clickhouse/](https://johntipper.org/how-to-execute-liquibase-changesets-against-clickhouse/) -* custom tool for ClickHouse +* HousePlant + * New CLI migration tool (Dec2024) for ClickHouse developed by [June](https://june.so) + * Documentation [https://houseplant.readthedocs.io/en/latest/index.html](https://houseplant.readthedocs.io/en/latest/index.html) + * Github [https://github.com/juneHQ/houseplant](https://github.com/juneHQ/houseplant) +* ClickSuite + * developed by [GameBeast](https://www.gamebeast.gg/) + * A robust CLI tool for managing ClickHouse database migrations with environment-specific configurations and TypeScript support. + * Github [https://github.com/GamebeastGG/clicksuite](https://github.com/GamebeastGG/clicksuite) +* Flyway + * [Official community supported plugin](https://documentation.red-gate.com/fd/clickhouse-database-277579307.html) [git](https://github.com/flyway/flyway-community-db-support/tree/main/flyway-database-clickhouse) https://github.com/flyway/flyway-community-db-support + * Old pull requests (latest at the top): + * [https://github.com/flyway/flyway/pull/3333](https://github.com/flyway/flyway/pull/3333) СlickHouse support + * [https://github.com/flyway/flyway/pull/3134](https://github.com/flyway/flyway/pull/3134) СlickHouse support + * [https://github.com/flyway/flyway/pull/3133](https://github.com/flyway/flyway/pull/3133) Add support ClickHouse + * [https://github.com/flyway/flyway/pull/2981](https://github.com/flyway/flyway/pull/2981) ClickHouse replicated + * [https://github.com/flyway/flyway/pull/2640](https://github.com/flyway/flyway/pull/2640) Yet another ClickHouse support + * [https://github.com/flyway/flyway/pull/2166](https://github.com/flyway/flyway/pull/2166) ClickHouse support (\#1772) + * [https://github.com/flyway/flyway/pull/1773](https://github.com/flyway/flyway/pull/1773) Fixed \#1772: Add support for ClickHouse ([https://clickhouse.yandex/](https://clickhouse.yandex/)) +* [alembic](https://alembic.sqlalchemy.org/en/latest/) + * see https://clickhouse-sqlalchemy.readthedocs.io/en/latest/migrations.html +* bytebase + * [https://bytebase.com](https://bytebase.com) +* custom tool for ClickHouse for python * [https://github.com/delium/clickhouse-migrator](https://github.com/delium/clickhouse-migrator) + * [https://github.com/zifter/clickhouse-migrations](https://github.com/zifter/clickhouse-migrations) + * [https://github.com/trushad0w/clickhouse-migrate](https://github.com/trushad0w/clickhouse-migrate) * phpMigrations - * [https://github.com/smi2/phpMigrationsClickhouse](https://github.com/smi2/phpMigrationsClickhouse) + * [https://github.com/smi2/phpMigrationsClickHouse](https://github.com/smi2/phpMigrationsClickhouse) * [https://habrahabr.ru/company/smi2/blog/317682/](https://habrahabr.ru/company/smi2/blog/317682/) -* dbmate +* dbmate * [https://github.com/amacneil/dbmate#clickhouse](https://github.com/amacneil/dbmate#clickhouse) -know more? +Know more? + +https://clickhouse.com/docs/knowledgebase/schema_migration_tools + +Article on migrations in ClickHouse +https://posthog.com/blog/async-migrations diff --git a/content/en/altinity-kb-setup-and-maintenance/schema-migration-tools/golang-migrate.md b/content/en/altinity-kb-setup-and-maintenance/schema-migration-tools/golang-migrate.md index 0f6e6028dc..0ad71eab16 100644 --- a/content/en/altinity-kb-setup-and-maintenance/schema-migration-tools/golang-migrate.md +++ b/content/en/altinity-kb-setup-and-maintenance/schema-migration-tools/golang-migrate.md @@ -8,7 +8,7 @@ description: > `migrate` is a simple schema migration tool written in golang. No external dependencies are required (like interpreter, jre), only one platform-specific executable. [golang-migrate/migrate](https://github.com/golang-migrate/migrate) -`migrate` supports several databases, including ClickHouse (support was introduced by [@kshvakov](https://github.com/kshvakov)). +`migrate` supports several databases, including ClickHouse® (support was introduced by [@kshvakov](https://github.com/kshvakov)). To store information about migrations state `migrate` creates one additional table in target database, by default that table is called `schema_migrations`. @@ -53,7 +53,9 @@ migrate -database 'clickhouse://localhost:9000' -path ./migrations down | URL Query | Description | | :--- | :--- | -| `x-migrations-table` | Name of the migrations table | +| `x-migrations-table`| Name of the migrations table | +| `x-migrations-table-engine`| Engine to use for the migrations table, defaults to TinyLog | +| `x-cluster-name` | Name of cluster for creating table cluster wide | | `database` | The name of the database to connect to | | `username` | The user to sign in as | | `password` | The user's password | @@ -63,23 +65,9 @@ migrate -database 'clickhouse://localhost:9000' -path ./migrations down #### Replicated / Distributed / Cluster environments -By default `migrate` create table `schema_migrations` with the following structure +`golang-migrate` supports a clustered ClickHouse environment since v4.15.0. -```sql -CREATE TABLE schema_migrations ( - version UInt32, - dirty UInt8, - sequence UInt64 -) ENGINE = TinyLog -``` - -That allows storing version of schema locally. - -If you need to use `migrate` in some multi server environment (replicated / cluster) you should create `schema_migrations` manually with the same structure and with the appropriate Engine (Replicated / Distributed), otherwise, other servers will not know the version of the DB schema. As an alternative you can force the current version number on another server manually, like that: - -```bash -migrate -database 'clickhouse://localhost:9000' -path ./migrations force 123456 # force version 123456 -``` +If you provide `x-cluster-name` query param, it will create the table to store migration data on the passed cluster. #### Known issues diff --git a/content/en/altinity-kb-setup-and-maintenance/source-pars-size-is-greater-than-maximum.md b/content/en/altinity-kb-setup-and-maintenance/source-pars-size-is-greater-than-maximum.md index 8fefa762f9..5001273d8f 100644 --- a/content/en/altinity-kb-setup-and-maintenance/source-pars-size-is-greater-than-maximum.md +++ b/content/en/altinity-kb-setup-and-maintenance/source-pars-size-is-greater-than-maximum.md @@ -21,8 +21,8 @@ SELECT * FROM system.merges ``` That logic is needed to prevent picking a log of huge merges simultaneously -(otherwise they will take all available slots and clickhouse will not be -able to do smaller merges, which usally are important for keeping the +(otherwise they will take all available slots and ClickHouse® will not be +able to do smaller merges, which usually are important for keeping the number of parts stable). @@ -35,8 +35,8 @@ system.replication_queue tables, it should be resolved by it's own. If it happens often or don't resolves by it's own during some longer period of time, it could be caused by: 1) increased insert pressure -2) disk issues / high load (it works slow, not enought space etc.) +2) disk issues / high load (it works slow, not enough space etc.) 3) high CPU load (not enough CPU power to catch up with merges) 4) issue with table schemas leading to high merges pressure (high / increased number of tables / partitions / etc.) -Start from checking dmesg / system journals / clickhouse monitoring to find the anomalies. +Start from checking dmesg / system journals / ClickHouse monitoring to find the anomalies. diff --git a/content/en/altinity-kb-setup-and-maintenance/suspiciously-many-broken-parts.md b/content/en/altinity-kb-setup-and-maintenance/suspiciously-many-broken-parts.md index 9cd69fde57..e2b3ff345c 100644 --- a/content/en/altinity-kb-setup-and-maintenance/suspiciously-many-broken-parts.md +++ b/content/en/altinity-kb-setup-and-maintenance/suspiciously-many-broken-parts.md @@ -2,12 +2,15 @@ title: "Suspiciously many broken parts" linkTitle: "Suspiciously many broken parts" description: > - Suspiciously many broken parts error during the server startup. + Debugging a common error message +keywords: + - clickhouse broken parts + - clickhouse too many broken parts --- ## Symptom: -clickhouse don't start with a message `DB::Exception: Suspiciously many broken parts to remove.` +clickhouse fails to start with a message `DB::Exception: Suspiciously many broken parts to remove.` ## Cause: @@ -25,32 +28,32 @@ Why data could be corrupted? ## Action: -1. If you ok to accept the data loss: set up `force_restore_data` flag and clickhouse will move the parts to detached. Data loss is possible if the issue is a result of misconfiguration (i.e. someone accidentally has fixed xml configs with incorrect shard/replica macros, data will be moved to detached folder and can be recovered). +1. If you are ok to accept the [data loss](/altinity-kb-setup-and-maintenance/recovery-after-complete-data-loss/): set up `force_restore_data` flag and clickhouse will move the parts to detached. Data loss is possible if the issue is a result of misconfiguration (i.e. someone accidentally has fixed xml configs with incorrect [shard/replica macros](https://altinity.com/webinarspage/deep-dive-on-clickhouse-sharding-and-replication), data will be moved to detached folder and can be recovered). ```bash sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data ``` - then restart clickhouse, the table will be attached, and the broken parts will be detached, which means the data from those parts will not be available for the selects. You can see the list of those parts in the `system.detached_parts` table and drop them if needed using `ALTER TABLE ... DROP DETACHED PART ...` commands. + then restart clickhouse. the table will be attached, and the broken parts will be detached, which means the data from those parts will not be available for the selects. You can see the list of those parts in the `system.detached_parts` table and drop them if needed using `ALTER TABLE ... DROP DETACHED PART ...` commands. If you are ok to tolerate bigger losses automatically you can change that safeguard configuration to be less sensitive by increasing `max_suspicious_broken_parts` setting: ``` cat /etc/clickhouse-server/config.d/max_suspicious_broken_parts.xml - + 50 - + ``` - this limit is set to 10 by default, we can set a bigger value (50 or 100 or more), but the data will lose because of the corruption. + this limit is set to 100 by default in recent releases. We can set a bigger value (250 or more), but the data will be lost because of the corruption. - Check also a similar setting `max_suspicious_broken_parts_bytes`. + Check out also a similar setting `max_suspicious_broken_parts_bytes`. See https://clickhouse.com/docs/en/operations/settings/merge-tree-settings/ 2. If you can't accept the data loss - you should recover data from backups / re-insert it once again etc. - If you don't want to tolerate automatic detaching of broken parts, you can set `max_suspicious_broken_parts_bytes` and `max_suspicious_broken_parts` to 0. + If you don't want to tolerate automatic detaching of broken parts, you can set `max_suspicious_broken_parts_bytes` and `max_suspicious_broken_parts` to 0. ## Scenario illustrating / testing @@ -79,9 +82,9 @@ attach table t111; Received exception from server (version 21.12.3): Code: 231. DB::Exception: Received from localhost:9000. DB::Exception: Suspiciously many (2) broken parts to remove.. (TOO_MANY_UNEXPEC ``` -4. setup force_restrore_data flag +4. setup force_restore_data flag ``` sudo -u clickhouse touch /var/lib/clickhouse/flags/force_restore_data sudo service clickhouse-server restart ``` -then the table `t111` will be attached lost the corrupted data. +then the table `t111` will be attached, losing the corrupted data. diff --git a/content/en/altinity-kb-setup-and-maintenance/sysall.md b/content/en/altinity-kb-setup-and-maintenance/sysall.md index 65385d52af..f93b4073d8 100644 --- a/content/en/altinity-kb-setup-and-maintenance/sysall.md +++ b/content/en/altinity-kb-setup-and-maintenance/sysall.md @@ -10,7 +10,7 @@ description: >- The idea is that you have a macros `cluster` with cluster name. -For example you have a cluster named `production` and this cluster includes all ClickHouse nodes. +For example you have a cluster named `production` and this cluster includes all ClickHouse® nodes. ```xml $ cat /etc/clickhouse-server/config.d/clusters.xml @@ -53,7 +53,12 @@ SETTINGS skip_unavailable_shards = 1 `skip_unavailable_shards` is necessary to query a system with some nodes are down. -## Script to create DB ojects +## Script to create DB objects + +```bash +clickhouse-client -q 'show tables from system'> list +for i in `cat list`; do echo "CREATE OR REPLACE VIEW sysall."$i" as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system."$i") SETTINGS skip_unavailable_shards = 1;"; done; +``` ```sql CREATE DATABASE sysall; @@ -72,7 +77,7 @@ FROM system.clusters LEFT JOIN ( SELECT - hostName() AS host_name, + replaceRegexpOne(hostName(),'-(\d+)-0$','-\1') AS host_name, -- remove trailing 0 FQDN() AS fqdn, materialize(uptime()) AS uptime FROM clusterAllReplicas('{cluster}', system.one) @@ -80,74 +85,64 @@ LEFT JOIN WHERE cluster = getMacro('cluster') SETTINGS skip_unavailable_shards = 1; -CREATE OR REPLACE VIEW sysall.asynchronous_metrics as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.asynchronous_metrics) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.query_log as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.query_log) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.dictionaries as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.dictionaries) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.replication_queue as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.replication_queue) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.replicas as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.replicas) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.merges as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.merges) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.mutations as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.mutations) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.parts as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.parts) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.detached_parts as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.detached_parts) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.disks as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.disks) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.distribution_queue as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.distribution_queue) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.databases as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.databases) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.events as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.events) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.metrics as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.metrics) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.macros as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.macros) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.tables as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.tables) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.clusters as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.clusters) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.columns as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.columns) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.processes as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.processes) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.errors as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.errors) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.settings as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.settings) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.parts_columns as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.parts_columns) SETTINGS skip_unavailable_shards = 1; - -CREATE OR REPLACE VIEW sysall.zookeeper as select hostName() nodeHost, FQDN() nodeFQDN, * -from clusterAllReplicas('{cluster}', system.zookeeper) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.asynchronous_inserts as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.asynchronous_inserts) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.asynchronous_metrics as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.asynchronous_metrics) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.backups as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.backups) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.clusters as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.clusters) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.columns as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.columns) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.current_roles as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.current_roles) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.data_skipping_indices as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.data_skipping_indices) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.databases as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.databases) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.detached_parts as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.detached_parts) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.dictionaries as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.dictionaries) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.disks as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.disks) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.distributed_ddl_queue as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.distributed_ddl_queue) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.distribution_queue as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.distribution_queue) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.dropped_tables as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.dropped_tables) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.enabled_roles as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.enabled_roles) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.errors as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.errors) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.events as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.events) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.filesystem_cache as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.filesystem_cache) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.grants as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.grants) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.jemalloc_bins as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.jemalloc_bins) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.macros as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.macros) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.merge_tree_settings as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.merge_tree_settings) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.merges as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.merges) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.metrics as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.metrics) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.moves as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.moves) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.mutations as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.mutations) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.named_collections as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.named_collections) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.parts as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.parts) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.parts_columns as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.parts_columns) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.privileges as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.privileges) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.processes as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.processes) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.projection_parts as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.projection_parts) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.projection_parts_columns as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.projection_parts_columns) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.query_cache as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.query_cache) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.query_log as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.query_log) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.quota_limits as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.quota_limits) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.quota_usage as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.quota_usage) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.quotas as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.quotas) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.quotas_usage as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.quotas_usage) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.replicas as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.replicas) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.replicated_fetches as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.replicated_fetches) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.replicated_merge_tree_settings as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.replicated_merge_tree_settings) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.replication_queue as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.replication_queue) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.role_grants as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.role_grants) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.roles as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.roles) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.row_policies as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.row_policies) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.server_settings as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.server_settings) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.settings as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.settings) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.settings_profile_elements as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.settings_profile_elements) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.settings_profiles as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.settings_profiles) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.storage_policies as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.storage_policies) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.tables as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.tables) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.user_directories as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.user_directories) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.user_processes as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.user_processes) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.users as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.users) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.warnings as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.warnings) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.zookeeper as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.zookeeper) SETTINGS skip_unavailable_shards = 1; +CREATE OR REPLACE VIEW sysall.zookeeper_connection as select hostName() nodeHost, FQDN() nodeFQDN, * from clusterAllReplicas('{cluster}', system.zookeeper_connection) SETTINGS skip_unavailable_shards = 1; ``` ## Some examples diff --git a/content/en/altinity-kb-setup-and-maintenance/timeouts-during-optimize-final.md b/content/en/altinity-kb-setup-and-maintenance/timeouts-during-optimize-final.md index 61febef29a..a422ca28b7 100644 --- a/content/en/altinity-kb-setup-and-maintenance/timeouts-during-optimize-final.md +++ b/content/en/altinity-kb-setup-and-maintenance/timeouts-during-optimize-final.md @@ -10,7 +10,7 @@ description: >- Timeout may occur 1) due to the fact that the client reach timeout interval. - - in case of TCP / native clients - you can change send_timeout / recieve_timeout + tcp_keep_alive_timeout + driver timeout settings + - in case of TCP / native clients - you can change send_timeout / receive_timeout + tcp_keep_alive_timeout + driver timeout settings - in case of HTTP clients - you can change http_send_timeout / http_receive_timeout + tcp_keep_alive_timeout + driver timeout settings 2) (in the case of ON CLUSTER queries) due to the fact that the timeout for query execution by shards ends diff --git a/content/en/altinity-kb-setup-and-maintenance/uniq-uuid-doubled-clickhouse-upgrade.md b/content/en/altinity-kb-setup-and-maintenance/uniq-uuid-doubled-clickhouse-upgrade.md index 9ed44ab259..9c3391da36 100644 --- a/content/en/altinity-kb-setup-and-maintenance/uniq-uuid-doubled-clickhouse-upgrade.md +++ b/content/en/altinity-kb-setup-and-maintenance/uniq-uuid-doubled-clickhouse-upgrade.md @@ -1,14 +1,12 @@ --- -title: "AggregateFunction(uniq, UUID) doubled after ClickHouse upgrade" -linkTitle: "AggregateFunction(uniq, UUID) doubled after ClickHouse upgrade" +title: "AggregateFunction(uniq, UUID) doubled after ClickHouse® upgrade" +linkTitle: "AggregateFunction(uniq, UUID) doubled after ClickHouse® upgrade" weight: 100 -description: >- - Page description for heading and indexes. --- ## What happened -After ClickHouse upgrade from version pre 21.6 to version after 21.6, count of unique UUID in AggregatingMergeTree tables nearly doubled in case of merging of data which was generated in different ClickHouse versions. +After ClickHouse® upgrade from version pre 21.6 to version after 21.6, count of unique UUID in AggregatingMergeTree tables nearly doubled in case of merging of data which was generated in different ClickHouse versions. ## Why happened diff --git a/content/en/altinity-kb-setup-and-maintenance/uniqExact-to-uniq-combined.md b/content/en/altinity-kb-setup-and-maintenance/uniqExact-to-uniq-combined.md new file mode 100644 index 0000000000..f2aa6f2065 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/uniqExact-to-uniq-combined.md @@ -0,0 +1,216 @@ +--- +title: "How to convert uniqExact states to approximate uniq functions states" +linkTitle: "Convert uniqExact to uniq(Combined)" +weight: 100 +description: >- + A way to convert to uniqExactState to other uniqStates (like uniqCombinedState) in ClickHouse® +--- + +## uniqExactState + +`uniqExactState` is stored in two parts: a count of values in `LEB128` format + list values without a delimiter. + +In our case, the value is `sipHash128` of strings passed to uniqExact function. + +```text +┌─hex(uniqExactState(toString(arrayJoin([1]))))─┐ +│ 01E2756D8F7A583CA23016E03447724DE7 │ +└───────────────────────────────────────────────┘ + 01 E2756D8F7A583CA23016E03447724DE7 + ^ ^ + LEB128 sipHash128 + + +┌─hex(uniqExactState(toString(arrayJoin([1, 2]))))───────────────────┐ +│ 024809CB4528E00621CF626BE9FA14E2BFE2756D8F7A583CA23016E03447724DE7 │ +└────────────────────────────────────────────────────────────────────┘ + 02 4809CB4528E00621CF626BE9FA14E2BF E2756D8F7A583CA23016E03447724DE7 + ^ ^ ^ +LEB128 sipHash128 sipHash128 +``` + +So, our task is to find how we can generate such values by ourself. +In case of `String` data type, it just the simple `sipHash128` function. + +```text +┌─hex(sipHash128(toString(2)))─────┬─hex(sipHash128(toString(1)))─────┐ +│ 4809CB4528E00621CF626BE9FA14E2BF │ E2756D8F7A583CA23016E03447724DE7 │ +└──────────────────────────────────┴──────────────────────────────────┘ +``` + +The second task: it needs to read a state and split it into an array of values. +Luckily for us, ClickHouse® use the exact same serialization (`LEB128` + list of values) for Arrays (in this case if `uniqExactState` and `Array` are serialized into `RowBinary` format). + +We need one a helper -- `UDF` function to do that conversion: + +```xml +cat /etc/clickhouse-server/pipe_function.xml + + + executable + 0 + pipe + Array(FixedString(16)) + + String + + RowBinary + cat + 0 + + +``` +This UDF -- `pipe` converts `uniqExactState` to the `Array(FixedString(16))`. + +```text +┌─arrayMap(x -> hex(x), pipe(uniqExactState(toString(arrayJoin([1, 2])))))──────────────┐ +│ ['4809CB4528E00621CF626BE9FA14E2BF','E2756D8F7A583CA23016E03447724DE7'] │ +└───────────────────────────────────────────────────────────────────────────────────────┘ +``` + +And here is the full example, how you can convert `uniqExactState(string)` to `uniqState(string)` or `uniqCombinedState(string)` using `pipe` UDF and `arrayReduce('func', [..])`. + +```sql +-- Generate demo with random data, uniqs are stored as heavy uniqExact +CREATE TABLE aggregates +( + `id` UInt32, + `uniqExact` AggregateFunction(uniqExact, String) +) +ENGINE = AggregatingMergeTree +ORDER BY id as +SELECT + number % 10000 AS id, + uniqExactState(toString(number)) +FROM numbers(10000000) +GROUP BY id; + +0 rows in set. Elapsed: 2.042 sec. Processed 10.01 million rows, 80.06 MB (4.90 million rows/s., 39.21 MB/s.) + +-- Let's add a new columns to store optimized, approximate uniq & uniqCombined +ALTER TABLE aggregates + ADD COLUMN `uniq` AggregateFunction(uniq, FixedString(16)) + default arrayReduce('uniqState', pipe(uniqExact)), + ADD COLUMN `uniqCombined` AggregateFunction(uniqCombined, FixedString(16)) + default arrayReduce('uniqCombinedState', pipe(uniqExact)); + +-- Materialize defaults in the new columns +ALTER TABLE aggregates UPDATE uniqCombined = uniqCombined, uniq = uniq +WHERE 1 settings mutations_sync=2; + +-- Let's reset defaults to remove the dependancy of the UDF from our table +ALTER TABLE aggregates + modify COLUMN `uniq` remove default, + modify COLUMN `uniqCombined` remove default; + +-- Alternatively you can populate data in the new columns directly without using DEFAULT columns +-- ALTER TABLE aggregates UPDATE +-- uniqCombined = arrayReduce('uniqCombinedState', pipe(uniqExact)), +-- uniq = arrayReduce('uniqState', pipe(uniqExact)) +-- WHERE 1 settings mutations_sync=2; + +-- Check results, results are slighty different, because uniq & uniqCombined are approximate functions +SELECT + id % 20 AS key, + uniqExactMerge(uniqExact), + uniqCombinedMerge(uniqCombined), + uniqMerge(uniq) +FROM aggregates +GROUP BY key + +┌─key─┬─uniqExactMerge(uniqExact)─┬─uniqCombinedMerge(uniqCombined)─┬─uniqMerge(uniq)─┐ +│ 0 │ 500000 │ 500195 │ 500455 │ +│ 1 │ 500000 │ 502599 │ 501549 │ +│ 2 │ 500000 │ 498058 │ 504428 │ +│ 3 │ 500000 │ 499748 │ 500195 │ +│ 4 │ 500000 │ 500791 │ 500836 │ +│ 5 │ 500000 │ 502430 │ 497558 │ +│ 6 │ 500000 │ 500262 │ 501785 │ +│ 7 │ 500000 │ 501514 │ 495758 │ +│ 8 │ 500000 │ 500121 │ 498597 │ +│ 9 │ 500000 │ 502173 │ 500455 │ +│ 10 │ 500000 │ 499144 │ 498386 │ +│ 11 │ 500000 │ 500525 │ 503139 │ +│ 12 │ 500000 │ 503624 │ 497103 │ +│ 13 │ 500000 │ 499986 │ 497992 │ +│ 14 │ 500000 │ 502027 │ 494833 │ +│ 15 │ 500000 │ 498831 │ 500983 │ +│ 16 │ 500000 │ 501103 │ 500836 │ +│ 17 │ 500000 │ 499409 │ 496791 │ +│ 18 │ 500000 │ 501641 │ 502991 │ +│ 19 │ 500000 │ 500648 │ 500881 │ +└─────┴───────────────────────────┴─────────────────────────────────┴─────────────────┘ + +20 rows in set. Elapsed: 2.312 sec. Processed 10.00 thousand rows, 7.61 MB (4.33 thousand rows/s., 3.29 MB/s.) +``` + + +Now, lets repeat the same insert, but in that case we will also populate `uniq` & `uniqCombined` with values converted via `sipHash128` function. +If we did everything right, `uniq` counts will not change, because we inserted the exact same values. + +```sql +INSERT INTO aggregates SELECT + number % 10000 AS id, + uniqExactState(toString(number)), + uniqState(sipHash128(toString(number))), + uniqCombinedState(sipHash128(toString(number))) +FROM numbers(10000000) +GROUP BY id; + +0 rows in set. Elapsed: 5.386 sec. Processed 10.01 million rows, 80.06 MB (1.86 million rows/s., 14.86 MB/s.) + + +SELECT + id % 20 AS key, + uniqExactMerge(uniqExact), + uniqCombinedMerge(uniqCombined), + uniqMerge(uniq) +FROM aggregates +GROUP BY key + +┌─key─┬─uniqExactMerge(uniqExact)─┬─uniqCombinedMerge(uniqCombined)─┬─uniqMerge(uniq)─┐ +│ 0 │ 500000 │ 500195 │ 500455 │ +│ 1 │ 500000 │ 502599 │ 501549 │ +│ 2 │ 500000 │ 498058 │ 504428 │ +│ 3 │ 500000 │ 499748 │ 500195 │ +│ 4 │ 500000 │ 500791 │ 500836 │ +│ 5 │ 500000 │ 502430 │ 497558 │ +│ 6 │ 500000 │ 500262 │ 501785 │ +│ 7 │ 500000 │ 501514 │ 495758 │ +│ 8 │ 500000 │ 500121 │ 498597 │ +│ 9 │ 500000 │ 502173 │ 500455 │ +│ 10 │ 500000 │ 499144 │ 498386 │ +│ 11 │ 500000 │ 500525 │ 503139 │ +│ 12 │ 500000 │ 503624 │ 497103 │ +│ 13 │ 500000 │ 499986 │ 497992 │ +│ 14 │ 500000 │ 502027 │ 494833 │ +│ 15 │ 500000 │ 498831 │ 500983 │ +│ 16 │ 500000 │ 501103 │ 500836 │ +│ 17 │ 500000 │ 499409 │ 496791 │ +│ 18 │ 500000 │ 501641 │ 502991 │ +│ 19 │ 500000 │ 500648 │ 500881 │ +└─────┴───────────────────────────┴─────────────────────────────────┴─────────────────┘ + +20 rows in set. Elapsed: 3.318 sec. Processed 20.00 thousand rows, 11.02 MB (6.03 thousand rows/s., 3.32 MB/s.) +``` + +Let's compare the data size, `uniq` won in this case, but check this article [Functions to count uniqs](../../altinity-kb-schema-design/uniq-functions/), mileage may vary. + +```sql +optimize table aggregates final; + +SELECT + column, + formatReadableSize(sum(column_data_compressed_bytes) AS size) AS compressed, + formatReadableSize(sum(column_data_uncompressed_bytes) AS usize) AS uncompressed +FROM system.parts_columns +WHERE (active = 1) AND (table LIKE 'aggregates') and column like '%uniq%' +GROUP BY column +ORDER BY size DESC; + +┌─column───────┬─compressed─┬─uncompressed─┐ +│ uniqExact │ 153.21 MiB │ 152.61 MiB │ +│ uniqCombined │ 76.62 MiB │ 76.32 MiB │ +│ uniq │ 38.33 MiB │ 38.18 MiB │ +└──────────────┴────────────┴──────────────┘ +``` diff --git a/content/en/altinity-kb-setup-and-maintenance/useful-setting-to-turn-on.md b/content/en/altinity-kb-setup-and-maintenance/useful-setting-to-turn-on.md index df95d394ef..b4eeba1655 100644 --- a/content/en/altinity-kb-setup-and-maintenance/useful-setting-to-turn-on.md +++ b/content/en/altinity-kb-setup-and-maintenance/useful-setting-to-turn-on.md @@ -10,11 +10,11 @@ description: >- Some setting that are not enabled by default. -* [ttl_only_drop_parts](https://clickhouse.com/docs/en/operations/settings/settings/#ttl_only_drop_parts) +* [ttl_only_drop_parts](https://clickhouse.com/docs/operations/settings/merge-tree-settings#ttl_only_drop_parts) Enables or disables complete dropping of data parts where all rows are expired in MergeTree tables. -When ttl_only_drop_parts is disabled (by default), the ClickHouse server only deletes expired rows according to their TTL. +When ttl_only_drop_parts is disabled (by default), the ClickHouse® server only deletes expired rows according to their TTL. When ttl_only_drop_parts is enabled, the ClickHouse server drops a whole part when all rows in it are expired. @@ -33,7 +33,7 @@ Possible values: * [aggregate_functions_null_for_empty](https://clickhouse.com/docs/en/operations/settings/settings/#aggregate_functions_null_for_empty) -Default behaviour is not compatible with ANSI SQL (ClickHouse avoids Nullable types by perfomance reasons) +Default behaviour is not compatible with ANSI SQL (ClickHouse avoids Nullable types by performance reasons) ```sql select sum(x), avg(x) from (select 1 x where 0); diff --git a/content/en/altinity-kb-setup-and-maintenance/who-ate-my-cpu.md b/content/en/altinity-kb-setup-and-maintenance/who-ate-my-cpu.md new file mode 100644 index 0000000000..1aa45ad825 --- /dev/null +++ b/content/en/altinity-kb-setup-and-maintenance/who-ate-my-cpu.md @@ -0,0 +1,66 @@ +--- +title: "Who ate my CPU" +linkTitle: "Who ate my CPU" +weight: 100 +description: >- + Queries to find which subsytem of ClickHouse® is using the most of CPU. +--- + +## Merges + +```sql +SELECT + table, + round((elapsed * (1 / progress)) - elapsed, 2) AS estimate, + elapsed, + progress, + is_mutation, + formatReadableSize(total_size_bytes_compressed) AS size, + formatReadableSize(memory_usage) AS mem +FROM system.merges +ORDER BY elapsed DESC +``` + +## Mutations + +```sql +SELECT + database, + table, + substr(command, 1, 30) AS command, + sum(parts_to_do) AS parts_to_do, + anyIf(latest_fail_reason, latest_fail_reason != '') +FROM system.mutations +WHERE NOT is_done +GROUP BY + database, + table, + command +``` + +## Current Processes + +```sql +select elapsed, query from system.processes where is_initial_query and elapsed > 2 +``` + +## Processes retrospectively + +```sql +SELECT + normalizedQueryHash(query) hash, + current_database, + sum(ProfileEvents['UserTimeMicroseconds'] as userCPUq)/1000 AS userCPUms, + count(), + sum(query_duration_ms) query_duration_ms, + userCPUms/query_duration_ms cpu_per_sec, + argMax(query, userCPUq) heaviest_query +FROM system.query_log +WHERE (type = 2) AND (event_date >= today()) +GROUP BY + current_database, + hash +ORDER BY userCPUms DESC +LIMIT 10 +FORMAT Vertical; +``` diff --git a/content/en/altinity-kb-setup-and-maintenance/zookeeper-session-expired.md b/content/en/altinity-kb-setup-and-maintenance/zookeeper-session-expired.md index 1e5167d51f..e1b0b68ca6 100644 --- a/content/en/altinity-kb-setup-and-maintenance/zookeeper-session-expired.md +++ b/content/en/altinity-kb-setup-and-maintenance/zookeeper-session-expired.md @@ -1,12 +1,12 @@ --- -title: "ZooKeeper session has expired" -linkTitle: "ZooKeeper session has expired" +title: "Zookeeper session has expired" +linkTitle: "Zookeeper session has expired" weight: 100 description: >- - ZooKeeper session has expired. + Zookeeper session has expired --- -> **Q. I get "ZooKeeper session has expired" once. What should i do? Should I worry?** +> **Q. I get "Zookeeper session has expired" once. What should i do? Should I worry?** Getting exceptions or lack of acknowledgement in distributed system from time to time is a normal situation. Your client should do the retry. If that happened once and your client do retries correctly - nothing to worry about. @@ -14,57 +14,75 @@ Your client should do the retry. If that happened once and your client do retrie It it happens often, or with every retry - it may be a sign of some misconfiguration / issue in cluster (see below). -> **Q. we see a lot of these: ZooKeeper session has expired. Switching to a new session** +> **Q. we see a lot of these: Zookeeper session has expired. Switching to a new session** -A. There is a single zookeeper session per server. But there are many threads that can use zookeeper simultaneously. -So the same event (we lose the single zookeeper session we had), will be reported by all the threads/queries which were using that zookeeper session. +A. There is a single Zookeeper session per server. But there are many threads that can use Zookeeper simultaneously. +So the same event (we lose the single Zookeeper session we had), will be reported by all the threads/queries which were using that Zookeeper session. -Usually after loosing the zookeeper session that exception is printed by all the thread which watch zookeeper replication queues, and all the threads which had some in-flight zookeeper operations (for example inserts, `ON CLUSTER` commands etc). +Usually after loosing the Zookeeper session that exception is printed by all the thread which watch Zookeeper replication queues, and all the threads which had some in-flight Zookeeper operations (for example inserts, `ON CLUSTER` commands etc). -If you see a lot of those simultaneously - that just means you have a lot of threads talking to zookeeper simultaneously (or may be you have many replicated tables?). +If you see a lot of those simultaneously - that just means you have a lot of threads talking to Zookeeper simultaneously (or may be you have many replicated tables?). BTW: every Replicated table comes with its own cost, so you [can't scale the number of replicated tables indefinitely](/altinity-kb-schema-design/how-much-is-too-much/#number-of-tables-system-wide-across-all-databases). -Typically after several hundreds (sometimes thousands) of replicated tables, the clickhouse server becomes unusable: it can't do any other work, but only keeping replication housekeeping tasks. 'ClickHouse-way' is to have a few (maybe dozens) of very huge tables instead of having thousands of tiny tables. (Side note: the number of not-replicated tables can be scaled much better). +Typically after several hundreds (sometimes thousands) of replicated tables, the ClickHouse® server becomes unusable: it can't do any other work, but only keeping replication housekeeping tasks. 'ClickHouse-way' is to have a few (maybe dozens) of very huge tables instead of having thousands of tiny tables. (Side note: the number of not-replicated tables can be scaled much better). So again if during short period of time you see lot of those exceptions and that don't happen anymore for a while - nothing to worry about. Just ensure your client is doing retries properly. > **Q. We are wondering what is causing that session to "timeout" as the default looks like 30 seconds, and there's certainly stuff happening much more frequently than every 30 seconds.** -Typically that has nothing with an expiration/timeout - even if you do nothing there are heartbeat events in the zookeeper protocol. +Typically that has nothing with an expiration/timeout - even if you do nothing there are heartbeat events in the Zookeeper protocol. -So internally inside clickhouse: -1) we have a 'zookeeper client' which in practice is a single zookeeper connection (TCP socket), with 2 threads - one serving reads, the seconds serving writes, and some API around. -2) while everything is ok zookeeper client keeps a single logical 'zookeeper session' (also by sending heartbeats etc). -3) we may have hundreds of 'users' of that zookeeper client - those are threads that do some housekeeping, serve queries etc. -4) zookeeper client normally have dozen 'in-flight' requests (asked by different threads). And if something bad happens with that -(disconnect, some issue with zookeeper server, some other failure), zookeeper client needs to re-establish the connection and switch to the new session +So internally inside ClickHouse: +1) we have a 'zookeeper client' which in practice is a single Zookeeper connection (TCP socket), with 2 threads - one serving reads, the seconds serving writes, and some API around. +2) while everything is ok Zookeeper client keeps a single logical 'zookeeper session' (also by sending heartbeats etc). +3) we may have hundreds of 'users' of that Zookeeper client - those are threads that do some housekeeping, serve queries etc. +4) Zookeeper client normally have dozen 'in-flight' requests (asked by different threads). And if something bad happens with that +(disconnect, some issue with Zookeeper server, some other failure), Zookeeper client needs to re-establish the connection and switch to the new session so all those 'in-flight' requests will be terminated with a 'session expired' exception. > **Q. That problem happens very often (all the time, every X minutes / hours / days).** -Sometimes the real issue can be visible somewhere close to the first 'session expired' exception in the log. (i.e. zookeeper client thread can +Sometimes the real issue can be visible somewhere close to the first 'session expired' exception in the log. (i.e. Zookeeper client thread can know & print to logs the real reason, while all 'user' threads just get 'session expired'). -Also zookeeper logs may ofter have a clue to that was the real problem. +Also Zookeeper logs may ofter have a clue to that was the real problem. -Known issues which can lead to session termination by zookeeper: +Known issues which can lead to session termination by Zookeeper: 1) connectivity / network issues. -2) `jute.maxbuffer` overrun. If you need to pass too much data in a single zookeeper transaction. (often happens if you need to do ALTER table UPDATE or other mutation on the table with big number of parts). The fix is adjusting JVM setting: -Djute.maxbuffer=8388608. See https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/jvm-sizes-and-garbage-collector-settings/ -3) XID overflow. XID is a transaction counter in zookeeper, if you do too many transactions the counter reaches maxint32, and to restart the counter zookeeper closes all the connections. Usually, that happens rarely, and is not avoidable in zookeeper (well in clickhouse-keeper that problem solved). There are some corner cases / some schemas which may end up with that XID overflow happening quite often. (a worst case we saw was once per 3 weeks). +2) `jute.maxbuffer` overrun. If you need to pass too much data in a single Zookeeper transaction. (often happens if you need to do ALTER table UPDATE or other mutation on the table with big number of parts). The fix is adjusting JVM setting: -Djute.maxbuffer=8388608. See https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/jvm-sizes-and-garbage-collector-settings/ +3) XID overflow. XID is a transaction counter in Zookeeper, if you do too many transactions the counter reaches maxint32, and to restart the counter Zookeeper closes all the connections. Usually, that happens rarely, and is not avoidable in Zookeeper (well in clickhouse-keeper that problem solved). There are some corner cases / some schemas which may end up with that XID overflow happening quite often. (a worst case we saw was once per 3 weeks). -> **Q. "ZooKeeper session has expired" happens every time I try to start the mutation / do other ALTER on Replicated table.** +> **Q. "Zookeeper session has expired" happens every time I try to start the mutation / do other ALTER on Replicated table.** -During ALTERing replicated table ClickHouse need to create a record in zookeeper listing all the parts which should be mutated (that usually means = list names of all parts of the table). If the size of list of parts exceeds maximum buffer size - zookeeper drops the connection. +During ALTERing replicated table ClickHouse need to create a record in Zookeeper listing all the parts which should be mutated (that usually means = list names of all parts of the table). If the size of list of parts exceeds maximum buffer size - Zookeeper drops the connection. Parts name length can be different for different tables. In average with default `jute.maxbuffer` (1Mb) mutations start to fail for tables which have more than 5000 parts. Solutions: 1) rethink partitioning, high number of parts in table is usually [not recommended](https://kb.altinity.com/altinity-kb-schema-design/how-much-is-too-much/#number-of-parts--partitions-system-wide-across-all-databases) -2) increase `jute.maxbuffer` on zookeeper side [to values about 8M](https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/jvm-sizes-and-garbage-collector-settings/) -3) use IN PARITION clause for mutations (where applicable) - since [20.12](https://github.com/ClickHouse/ClickHouse/pull/13403) +2) increase `jute.maxbuffer` on Zookeeper side [to values about 8M](https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-zookeeper/jvm-sizes-and-garbage-collector-settings/) +3) use IN PARTITION clause for mutations (where applicable) - since [20.12](https://github.com/ClickHouse/ClickHouse/pull/13403) 4) switch to clickhouse-keeper +> **Q. "Zookeeper session has expired and also Operation timeout" happens when reading blocks from Zookeeper**: + +```bash +2024.02.22 07:20:39.222171 [ 1047 ] {} ZooKeeperClient: Code: 999. Coordination::Exception: Operation timeout (no response) for request List for path: +/clickhouse/tables/github_events/block_numbers/20240205105000 (Operation timeout). (KEEPER_EXCEPTION), +2024.02.22 07:20:39.223293 [ 246 ] {} default.github_events : void DB::StorageReplicatedMergeTree::mergeSelectingTask(): +Code: 999. Coordination::Exception: /clickhouse/tables/github_events/block_numbers/20240205105000 (Connection loss). +``` + +Sometimes these `Session expired` and `operation timeout` are common, because of merges that read all the blocks in Zookeeper for a table and if there are many blocks (and partitions) read time can be longer than the 10 secs default [operation timeout](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#server-settings_zookeeper). +When dropping a partition, ClickHouse never drops old block numbers from Zookeeper, so the list grows indefinitely. It is done as a precaution against race between DROP PARTITION and INSERT. It is safe to clean those old blocks manually + +This is being addressed in **[#59507 Add `FORGET PARTITION` query to remove old partition nodes from](https://github.com/ClickHouse/ClickHouse/pull/59507)** + +Solutions: +Manually remove old/forgotten blocks https://kb.altinity.com/altinity-kb-useful-queries/remove_unneeded_block_numbers/ + + Related issues: - https://github.com/ClickHouse/ClickHouse/issues/16307 - https://github.com/ClickHouse/ClickHouse/issues/11933 diff --git a/content/en/altinity-kb-useful-queries/_index.md b/content/en/altinity-kb-useful-queries/_index.md index 1e8a336e1f..984639bd84 100644 --- a/content/en/altinity-kb-useful-queries/_index.md +++ b/content/en/altinity-kb-useful-queries/_index.md @@ -5,6 +5,6 @@ keywords: - clickhouse queries - clickhouse datasets description: > - Access useful ClickHouse queries, from finding database size, missing blocks, checking table metadata in Zookeeper, and more. + Access useful ClickHouse® queries, from finding database size, missing blocks, checking table metadata in Zookeeper, and more. weight: 6 --- diff --git a/content/en/altinity-kb-useful-queries/altinity-kb-database-size-table-column-size.md b/content/en/altinity-kb-useful-queries/altinity-kb-database-size-table-column-size.md index f09c039f72..fdfc18aae3 100644 --- a/content/en/altinity-kb-useful-queries/altinity-kb-database-size-table-column-size.md +++ b/content/en/altinity-kb-useful-queries/altinity-kb-database-size-table-column-size.md @@ -3,6 +3,9 @@ title: "Database Size - Table - Column size" linkTitle: "Database Size - Table - Column size" description: > Database Size - Table - Column size +keywords: + - clickhouse database size + - clickhouse table size --- ## Tables @@ -229,3 +232,38 @@ GROUP BY table FORMAT Vertical ``` + +## Subcolumns sizes + +```sql +WITH + if( + length(subcolumns.names) > 0, + arrayMap( (sc_n,sc_t,sc_s, sc_bod, sc_dcb, sc_dub) -> tuple(sc_n,sc_t,sc_s, sc_bod, sc_dcb, sc_dub), subcolumns.names, subcolumns.types, subcolumns.serializations, subcolumns.bytes_on_disk, subcolumns.data_compressed_bytes, subcolumns.data_uncompressed_bytes), + [tuple('',type,serialization_kind,column_bytes_on_disk,column_data_compressed_bytes,column_data_uncompressed_bytes)]) as _subcolumns_data, + arrayJoin(_subcolumns_data) as _subcolumn, + _subcolumn.1 as _sc_name, + _subcolumn.2 as _sc_type, + _subcolumn.3 as _sc_serialization, + _subcolumn.4 as _sc_bytes_on_disk, + _subcolumn.5 as _sc_data_compressed_bytes, + _subcolumn.6 as _sc_uncompressed_bytes +SELECT + database || '.' || table as table_, + column as colunm_, + _sc_name as subcolumn_, + any(_sc_type), + formatReadableSize(sum(_sc_data_compressed_bytes) AS size) AS compressed, + formatReadableSize(sum(_sc_uncompressed_bytes) AS usize) AS uncompressed, + round(usize / size, 2) AS compr_ratio, + sum(rows) AS rows_cnt, + round(usize / rows_cnt, 2) AS avg_row_size +FROM system.parts_columns +WHERE (active = 1) AND (database LIKE '%') AND (`table` LIKE '%) +GROUP BY + table_, + colunm_, + subcolumn_ +ORDER BY size DESC ; +``` + diff --git a/content/en/altinity-kb-useful-queries/altinity-kb-datasets.md b/content/en/altinity-kb-useful-queries/altinity-kb-datasets.md deleted file mode 100644 index 4603267bae..0000000000 --- a/content/en/altinity-kb-useful-queries/altinity-kb-datasets.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "Datasets" -linkTitle: "Datasets" -description: > - Datasets ---- diff --git a/content/en/altinity-kb-useful-queries/altinity-kb-number-of-active-parts-in-a-partition.md b/content/en/altinity-kb-useful-queries/altinity-kb-number-of-active-parts-in-a-partition.md index bfb793dbba..44e6f043eb 100644 --- a/content/en/altinity-kb-useful-queries/altinity-kb-number-of-active-parts-in-a-partition.md +++ b/content/en/altinity-kb-useful-queries/altinity-kb-number-of-active-parts-in-a-partition.md @@ -4,7 +4,7 @@ linkTitle: "Number of active parts in a partition" description: > Number of active parts in a partition --- -## Q: Why do I have several active parts in a partition? Why Clickhouse does not merge them immediately? +## Q: Why do I have several active parts in a partition? Why ClickHouse® does not merge them immediately? ### A: CH does not merge parts by time @@ -14,7 +14,7 @@ CH merge scheduler balances between a big number of parts and a wasting resource Merges are CPU/DISK IO expensive. If CH will merge every new part then all resources will be spend on merges and will no resources remain on queries (selects ). -CH will not merge parts with a combined size greater than 100 GB. +CH will not merge parts with a combined size greater than 150 GB [max_bytes_to_merge_at_max_space_in_pool](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#max-bytes-to-merge-at-max-space-in-pool). ``` SELECT diff --git a/content/en/altinity-kb-useful-queries/compare_query_log_for_2_intervals.md b/content/en/altinity-kb-useful-queries/compare_query_log_for_2_intervals.md new file mode 100644 index 0000000000..fe537c77a9 --- /dev/null +++ b/content/en/altinity-kb-useful-queries/compare_query_log_for_2_intervals.md @@ -0,0 +1,95 @@ +--- +title: "Compare query_log for 2 intervals" +linkTitle: "Compare query_log for 2 intervals" +weight: 100 +description: >- +--- + +``` +WITH + toStartOfInterval(event_time, INTERVAL 5 MINUTE) = '2023-06-30 13:00:00' as before, + toStartOfInterval(event_time, INTERVAL 5 MINUTE) = '2023-06-30 15:00:00' as after +SELECT + normalized_query_hash, + anyIf(query, before) AS QueryBefore, + anyIf(query, after) AS QueryAfter, + countIf(before) as CountBefore, + sumIf(query_duration_ms, before) / 1000 AS QueriesDurationBefore, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'RealTimeMicroseconds')], before) / 1000000 AS RealTimeBefore, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'UserTimeMicroseconds')], before) / 1000000 AS UserTimeBefore, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'SystemTimeMicroseconds')], before) / 1000000 AS SystemTimeBefore, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'DiskReadElapsedMicroseconds')], before) / 1000000 AS DiskReadTimeBefore, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'DiskWriteElapsedMicroseconds')], before) / 1000000 AS DiskWriteTimeBefore, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'NetworkSendElapsedMicroseconds')], before) / 1000000 AS NetworkSendTimeBefore, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'NetworkReceiveElapsedMicroseconds')], before) / 1000000 AS NetworkReceiveTimeBefore, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'ZooKeeperWaitMicroseconds')], before) / 1000000 AS ZooKeeperWaitTimeBefore, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'OSIOWaitMicroseconds')], before) / 1000000 AS OSIOWaitTimeBefore, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'OSCPUWaitMicroseconds')], before) / 1000000 AS OSCPUWaitTimeBefore, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'OSCPUVirtualTimeMicroseconds')], before) / 1000000 AS OSCPUVirtualTimeBefore, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'SelectedBytes')], before) AS SelectedBytesBefore, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'SelectedRanges')], before) AS SelectedRangesBefore, + sumIf(read_rows, before) AS ReadRowsBefore, + formatReadableSize(sumIf(read_bytes, before) AS ReadBytesBefore), + sumIf(written_rows, before) AS WrittenTowsBefore, + formatReadableSize(sumIf(written_bytes, before)) AS WrittenBytesBefore, + sumIf(result_rows, before) AS ResultRowsBefore, + formatReadableSize(sumIf(result_bytes, before)) AS ResultBytesBefore, + + countIf(after) as CountAfter, + sumIf(query_duration_ms, after) / 1000 AS QueriesDurationAfter, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'RealTimeMicroseconds')], after) / 1000000 AS RealTimeAfter, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'UserTimeMicroseconds')], after) / 1000000 AS UserTimeAfter, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'SystemTimeMicroseconds')], after) / 1000000 AS SystemTimeAfter, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'DiskReadElapsedMicroseconds')], after) / 1000000 AS DiskReadTimeAfter, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'DiskWriteElapsedMicroseconds')], after) / 1000000 AS DiskWriteTimeAfter, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'NetworkSendElapsedMicroseconds')], after) / 1000000 AS NetworkSendTimeAfter, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'NetworkReceiveElapsedMicroseconds')], after) / 1000000 AS NetworkReceiveTimeAfter, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'ZooKeeperWaitMicroseconds')], after) / 1000000 AS ZooKeeperWaitTimeAfter, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'OSIOWaitMicroseconds')], after) / 1000000 AS OSIOWaitTimeAfter, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'OSCPUWaitMicroseconds')], after) / 1000000 AS OSCPUWaitTimeAfter, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'OSCPUVirtualTimeMicroseconds')], after) / 1000000 AS OSCPUVirtualTimeAfter, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'SelectedBytes')], after) AS SelectedBytesAfter, + sumIf(ProfileEvents.Values[indexOf(ProfileEvents.Names, 'SelectedRanges')], after) AS SelectedRangesAfter, + + sumIf(read_rows, after) AS ReadRowsAfter, + formatReadableSize(sumIf(read_bytes, after) AS ReadBytesAfter), + sumIf(written_rows, after) AS WrittenTowsAfter, + formatReadableSize(sumIf(written_bytes, after)) AS WrittenBytesAfter, + sumIf(result_rows, after) AS ResultRowsAfter, + formatReadableSize(sumIf(result_bytes, after)) AS ResultBytesAfter + +FROM system.query_log +WHERE (before OR after) AND type in (2,4) -- QueryFinish, ExceptionWhileProcessing +GROUP BY normalized_query_hash + WITH TOTALS +ORDER BY SelectedRangesAfter- SelectedRangesBefore DESC +LIMIT 10 +FORMAT Vertical +``` + + +``` +WITH + toDateTime('2024-02-09 00:00:00') as timestamp_of_issue, + event_time < timestamp_of_issue as before, + event_time >= timestamp_of_issue as after +select + normalized_query_hash as h, + any(query) as query_sample, + round(quantileIf(0.9)(query_duration_ms, before)) as duration_q90_before, + round(quantileIf(0.9)(query_duration_ms, after)) as duration_q90_after, + countIf(before) as cnt_before, + countIf(after) as cnt_after, + sumIf(query_duration_ms,before) as duration_sum_before, + sumIf(query_duration_ms,after) as duration_sum_after, + sumIf(ProfileEvents['UserTimeMicroseconds'], before) as usertime_sum_before, + sumIf(ProfileEvents['UserTimeMicroseconds'], after) as usertime_sum_after, + sumIf(read_bytes,before) as sum_read_bytes_before, + sumIf(read_bytes,after) as sum_read_bytes_after +from system.query_log +where event_time between timestamp_of_issue - INTERVAL 3 DAY and timestamp_of_issue + INTERVAL 3 DAY +group by h +HAVING cnt_after > 1.1 * cnt_before OR sum_read_bytes_after > 1.2 * sum_read_bytes_before OR usertime_sum_after > 1.2 * usertime_sum_before +ORDER BY sum_read_bytes_after - sum_read_bytes_before +FORMAT Vertical +``` diff --git a/content/en/altinity-kb-useful-queries/connection-issues-distributed-parts.md b/content/en/altinity-kb-useful-queries/connection-issues-distributed-parts.md new file mode 100644 index 0000000000..60a28e83e1 --- /dev/null +++ b/content/en/altinity-kb-useful-queries/connection-issues-distributed-parts.md @@ -0,0 +1,37 @@ +--- +title: "Notes on Various Errors with respect to replication and distributed connections" +linkTtitle: "Notes on Various Errors with respect to replication and distributed connections" +description: > + Notes on errors related to replication and distributed connections +keywords: + - replication + - distributed connections +--- + +## `ClickHouseDistributedConnectionExceptions` + +This alert usually indicates that one of the nodes isn’t responding or that there’s an interconnectivity issue. Debug steps: + +## 1. Check Cluster Connectivity +Verify connectivity inside the cluster by running: +``` +SELECT count() FROM clusterAllReplicas('{cluster}', cluster('{cluster}', system.one)) +``` + +## 2. Check for Errors +Run the following queries to see if any nodes report errors: + +``` +SELECT hostName(), * FROM clusterAllReplicas('{cluster}', system.clusters) WHERE errors_count > 0; +SELECT hostName(), * FROM clusterAllReplicas('{cluster}', system.errors) WHERE last_error_time > now() - 3600 ORDER BY value; +``` + + Depending on the results, ensure that the affected node is up and responding to queries. Also, verify that connectivity (DNS, routes, delays) is functioning correctly. + +### `ClickHouseReplicatedPartChecksFailed` & `ClickHouseReplicatedPartFailedFetches` + +Unless you’re seeing huge numbers, these alerts can generally be ignored. They’re often a sign of temporary replication issues that ClickHouse resolves on its own. However, if the issue persists or increases rapidly, follow the steps to debug replication issues: + +* Check the replication status using tables such as system.replicas and system.replication_queue. +* Examine server logs, system.errors, and system load for any clues. +* Try to restart the replica (`SYSTEM RESTART REPLICA db_name.table_name` command) and, if necessary, contact Altinity support. diff --git a/content/en/altinity-kb-useful-queries/debug-hang.md b/content/en/altinity-kb-useful-queries/debug-hang.md index 5f7d9b0c8f..be6b6e3872 100644 --- a/content/en/altinity-kb-useful-queries/debug-hang.md +++ b/content/en/altinity-kb-useful-queries/debug-hang.md @@ -1,18 +1,18 @@ --- -title: "Debug hunging thing" -linkTitle: "Debug hunging thing" +title: "Debug hanging thing" +linkTitle: "Debug hanging thing" weight: 100 description: >- - Debug hunging / freezing things + Debug hanging / freezing things --- -## Debug hunging / freezing things +## Debug hanging / freezing things -If ClickHouse is busy with something and you don't know what's happeing, you can easily check the stacktraces of all the thread which are working +If ClickHouse® is busy with something and you don't know what's happening, you can easily check the stacktraces of all the thread which are working ```sql SELECT - arrayStringConcat(arrayMap(x -> demangle(addressToSymbol(x)), trace), '\n') AS trace_functions, + arrayStringConcat(arrayMap(x -> concat('0x', lower(hex(x)), '\t', demangle(addressToSymbol(x))), trace), '\n') as trace_functions, count() FROM system.stack_trace GROUP BY trace_functions @@ -22,7 +22,7 @@ SETTINGS allow_introspection_functions=1 FORMAT Vertical; ``` -If you can't start any queries, but you have access to the node, you can sent a singal +If you can't start any queries, but you have access to the node, you can sent a signal ``` # older versions diff --git a/content/en/altinity-kb-useful-queries/detached-parts.md b/content/en/altinity-kb-useful-queries/detached-parts.md index 4b83fa2ab3..40fb88ef92 100644 --- a/content/en/altinity-kb-useful-queries/detached-parts.md +++ b/content/en/altinity-kb-useful-queries/detached-parts.md @@ -1,30 +1,101 @@ --- -title: "Can detached parts be dropped?" +title: "Can detached parts in ClickHouse® be dropped?" linkTitle: "Can detached parts be dropped?" description: > - Can detached parts be dropped? + Cleaning up detached parts without data loss +keywords: + - clickhouse detached parts + - clickhouse detach + - clickhouse drop partition --- -Here is what different statuses mean: -1. Parts are renamed to 'ignored' if they were found during ATTACH together with other, bigger parts that cover the same blocks of data, i.e. they were already merged into something else. -2. parts are renamed to 'broken' if ClickHouse was not able to load data from the parts. There could be different reasons: some files are lost, checksums are not correct, etc. -3. parts are renamed to 'unexpected' if they are present locally, but are not found in ZooKeeper, in case when an insert was not completed properly. The part is detached only if it's old enough (5 minutes), otherwise CH registers this part in ZooKeeper as a new part. -4. parts are renamed to 'cloned' if ClickHouse have had some parts on local disk while repairing lost replica so already existed parts being renamed and put in detached directory. Controlled by setting `detach_old_local_parts_when_cloning_replica`. +Detached parts act like the “Recycle Bin” in Windows. When ClickHouse deems some data unneeded—often during internal reconciliations at server startup—it moves the data to the detached area instead of deleting it immediately. + +Recovery: If you’re missing data due to misconfiguration or an error (such as connecting to the wrong ZooKeeper), check the detached parts. The missing data might be recoverable through manual intervention. + +Cleanup: Otherwise, clean up the detached parts periodically to free disk space. + +Regarding detached parts and the absence of an automatic cleanup feature within ClickHouse: this was a deliberate decision, as there is a possibility that data may appear there due to a bug in ClickHouse's code, a hardware error (such as a memory error or disk failure), etc. In such cases, automatic cleanup is not desirable. + +Blog article - https://altinity.com/blog/understanding-detached-parts-in-clickhouse + + +ClickHouse® users should monitor for detached parts and act quickly when they appear. Here is what the different statuses of detached parts mean: + +1. Parts are renamed to **ignored** if they were found during ATTACH together with other, bigger parts that cover the same blocks of data, i.e. they were already merged into something else. +2. Parts are renamed to **broken** if ClickHouse was not able to load data from the parts. There could be different reasons: some files are lost, checksums are not correct, etc. +3. Parts are renamed to **unexpected** if they are present locally, but are not found in ZooKeeper, in case when an insert was not completed properly. The part is detached only if it's old enough (5 minutes), otherwise CH registers this part in ZooKeeper as a new part. +4. Parts are renamed to **cloned** if ClickHouse has had some parts on local disk while repairing lost replica so already existed parts being renamed and put in detached directory. Controlled by setting `detach_old_local_parts_when_cloning_replica`. 'Ignored' parts are safe to delete. 'Unexpected' and 'broken' should be investigated, but it might not be an easy thing to do, especially for older parts. If the `system.part_log table` is enabled you can find some information there. Otherwise you will need to look in `clickhouse-server.log` for what happened when the parts were detached. If there is another way you could confirm that there is no data loss in the affected tables, you could simply delete all detached parts. -Here is a query that can help with investigations. It looks for active parts containing the same data blocks that the detached parts: +Again, it is important to monitor for detached parts and act quickly when they appear. If `clickhouse-server.log` is lost it might be impossible to figure out what happened and why the parts were detached. +You can use `system.asynchronous_metrics` or `system.detached_parts` for monitoring. +```sql +select metric from system.asynchronous_metrics where metric ilike '%detach%' + +NumberOfDetachedByUserParts +NumberOfDetachedParts +``` +Here is a quick way to find out if you have detached parts along with the reason why. ```sql -SELECT *, - concat('alter table ',database,'.',table,' drop detached part ''',a.name,''' settings allow_drop_detached=1;') as drop -FROM system.detached_parts a -ALL LEFT JOIN -(SELECT database, table, partition_id, name, active, min_block_number, max_block_number - FROM system.parts WHERE active -) b -USING (database, table, partition_id) -WHERE a.min_block_number >= b.min_block_number - AND a.max_block_number <= b.max_block_number +SELECT database, table, reason, count() +FROM system.detached_parts +GROUP BY database, table, reason +ORDER BY database ASC, table ASC, reason ASC +``` + +### drop detached +The DROP DETACHED command in ClickHouse is used to remove parts or partitions that have previously been detached (i.e., moved to the detached directory and forgotten by the server). The syntax is: + +``` +ALTER TABLE table_name [ON CLUSTER cluster] DROP DETACHED PARTITION|PART ALL|partition_expr ``` + +This command removes the specified part or all parts of the specified partition from the detached directory. For more details on how to specify the partition expression, see the documentation on how to set the partition expression DROP DETACHED PARTITION|PART. + +Note: You must have the allow_drop_detached setting enabled to use this command allow_drop_detached + +### drop all script + +Here is a query that can help with investigations. It looks for active parts containing the same data blocks as the detached parts. It +generates commands to drop the detached parts. + +```sql +with ['broken','unexpected','noquorum','ignored','broken-on-start','clone','attaching','deleting','tmp-fetch', + 'covered-by-broken','merge-not-byte-identical','mutate-not-byte-identical','broken-from-backup'] as DETACH_REASONS +select a.*, + concat('alter table ',database,'.',table,' drop detached part ''',a.name,''' settings allow_drop_detached=1;') as drop, + concat('sudo rm -r ',a.path) as rm +from (select * replace(part[1] as partition_id, toInt64(part[2]) as min_block_number, toInt64(part[3]) as max_block_number), + arrayFilter(x -> x not in DETACH_REASONS, splitByChar('_',name)) as part +from system.detached_parts) a +left join (select database, table, partition_id, name, active, min_block_number, max_block_number from system.parts where active) b +on a.database=b.database and a.table=b.table and a.partition_id=b.partition_id +where a.min_block_number >= b.min_block_number + and a.max_block_number <= b.max_block_number +order by table, min_block_number, max_block_number +settings join_use_nulls=1 +``` + +### Other reasons + +``` +# rg forgetPartAndMoveToDetached --type cpp +# rg renameToDetached --type cpp +# rg makeCloneInDetached --type cpp +broken +unexpected +ignored +noquorum +merge-not-byte-identical +mutate-not-byte-identical - +broken-on-start +clone +covered-by-broken - that means that ClickHouse during initialization of replicated table detected that some part is not ok, and decided to refetch it from healthy replicas. So the part itself will be detached as 'broken' and if that part was a result of merge / mutation all the previuos generations of that will be marked as covered-by-broken. If clickhouse was able to download the final part you don't need those covered-by-broken. +``` + +The list of DETACH_REASONS: https://github.com/ClickHouse/ClickHouse/blob/master/src/Storages/MergeTree/MergeTreePartInfo.h#L163 + diff --git a/content/en/altinity-kb-useful-queries/ingestion-rate-part_log.md b/content/en/altinity-kb-useful-queries/ingestion-rate-part_log.md index 01b492559c..92d1a7ae6c 100644 --- a/content/en/altinity-kb-useful-queries/ingestion-rate-part_log.md +++ b/content/en/altinity-kb-useful-queries/ingestion-rate-part_log.md @@ -6,8 +6,8 @@ description: >- Query to gather information about ingestion rate from system.part_log. --- +## Insert rate ```sql --- Insert rate select database, table, time_bucket, max(number_of_parts_per_insert) max_parts_pi, median(number_of_parts_per_insert) median_parts_pi, @@ -52,11 +52,28 @@ GROUP BY query_id, database, table, time_bucket ) GROUP BY database, table, time_bucket ORDER BY time_bucket, database, table ASC +``` --- New parts per partition +## New parts per partition +```sql select database, table, event_type, partition_id, count() c, round(avg(rows)) from system.part_log where event_date >= today() and event_type = 'NewPart' group by database, table, event_type, partition_id order by c desc ``` +## Too fast inserts + +It should not be more often than 1 new part per table per second (60 inserts per minute) +One insert can create several parts because of partitioning and materialized views attached. + +```sql +select toStartOfMinute(event_time) t, database, table, count() c, round(avg(rows)) +from system.part_log +where event_date >= today() + and event_type = 'NewPart' + --and event_time > now() - 3600 +group by database, table, t +order by t +``` + diff --git a/content/en/altinity-kb-useful-queries/parts-consistency.md b/content/en/altinity-kb-useful-queries/parts-consistency.md index edcb739ae1..200b07796f 100644 --- a/content/en/altinity-kb-useful-queries/parts-consistency.md +++ b/content/en/altinity-kb-useful-queries/parts-consistency.md @@ -73,7 +73,7 @@ left join select concat(replica_path,'/parts/',name) as p_path from system.parts inner join system.replicas using (database, table) ) disk on zoo.p_path = disk.p_path -where part_disk='' +where part_disk='' and zoo.mtime <= now() - interval 1 hour order by part_zoo; ``` diff --git a/content/en/altinity-kb-useful-queries/query_log.md b/content/en/altinity-kb-useful-queries/query_log.md index 83b606ab8f..7fd7cf6050 100644 --- a/content/en/altinity-kb-useful-queries/query_log.md +++ b/content/en/altinity-kb-useful-queries/query_log.md @@ -1,6 +1,6 @@ --- -title: "Handy queries for a system.query_log" -linkTitle: "Handy queries for a system.query_log" +title: "Handy queries for system.query_log" +linkTitle: "Handy queries for system.query_log" weight: 100 description: >- Handy queries for a system.query_log. @@ -32,7 +32,7 @@ SELECT sum(result_rows) AS ResultRows, formatReadableSize(sum(result_bytes)) AS ResultBytes FROM system.query_log -WHERE (event_time > (now() - 3600)) AND type in (2,4) -- QueryFinish, ExceptionWhileProcessing +WHERE (event_date >= today()) AND (event_time > (now() - 3600)) AND type in (2,4) -- QueryFinish, ExceptionWhileProcessing GROUP BY normalized_query_hash WITH TOTALS ORDER BY UserTime DESC @@ -40,7 +40,124 @@ LIMIT 30 FORMAT Vertical ``` -## Find queries which were started but not finished at some moment in time +-- modern ClickHouse® + +```sql +SELECT + hostName() as host, + normalized_query_hash, + min(event_time), + max(event_time), + replace(substr(argMax(query, utime), 1, 80), '\n', ' ') AS query, + argMax(query_id, utime) AS sample_query_id, + count(), + sum(query_duration_ms) / 1000 AS QueriesDuration, /* wall clock */ + sum(ProfileEvents['RealTimeMicroseconds']) / 1000000 AS RealTime, /* same as above but x number of thread */ + sum(ProfileEvents['UserTimeMicroseconds'] as utime) / 1000000 AS UserTime, /* time when our query was doin some cpu-insense work, creating cpu load */ + sum(ProfileEvents['SystemTimeMicroseconds']) / 1000000 AS SystemTime, /* time spend on waiting for some system operations */ + sum(ProfileEvents['DiskReadElapsedMicroseconds']) / 1000000 AS DiskReadTime, + sum(ProfileEvents['DiskWriteElapsedMicroseconds']) / 1000000 AS DiskWriteTime, + sum(ProfileEvents['NetworkSendElapsedMicroseconds']) / 1000000 AS NetworkSendTime, /* check the other side of the network! */ + sum(ProfileEvents['NetworkReceiveElapsedMicroseconds']) / 1000000 AS NetworkReceiveTime, /* check the other side of the network! */ + sum(ProfileEvents['ZooKeeperWaitMicroseconds']) / 1000000 AS ZooKeeperWaitTime, + sum(ProfileEvents['OSIOWaitMicroseconds']) / 1000000 AS OSIOWaitTime, /* IO waits, usually disks - that metric is 'orthogonal' to other */ + sum(ProfileEvents['OSCPUWaitMicroseconds']) / 1000000 AS OSCPUWaitTime, /* waiting for a 'free' CPU - usually high when the other load on the server creates a lot of contention for cpu */ + sum(ProfileEvents['OSCPUVirtualTimeMicroseconds']) / 1000000 AS OSCPUVirtualTime, /* similar to usertime + system time */ + formatReadableSize(sum(ProfileEvents['NetworkReceiveBytes']) as network_receive_bytes) AS NetworkReceiveBytes, + formatReadableSize(sum(ProfileEvents['NetworkSendBytes']) as network_send_bytes) AS NetworkSendBytes, + sum(ProfileEvents['SelectedParts']) as SelectedParts, + sum(ProfileEvents['SelectedRanges']) as SelectedRanges, + sum(ProfileEvents['SelectedMarks']) as SelectedMarks, + sum(ProfileEvents['SelectedRows']) as SelectedRows, /* those may different from read_rows - here the number or rows potentially matching the where conditions, not neccessary all will be read */ + sum(ProfileEvents['SelectedBytes']) as SelectedBytes, + sum(ProfileEvents['FileOpen']) as FileOpen, + sum(ProfileEvents['ZooKeeperTransactions']) as ZooKeeperTransactions, + formatReadableSize(sum(ProfileEvents['OSReadBytes'] ) as os_read_bytes ) as OSReadBytesExcludePageCache, + formatReadableSize(sum(ProfileEvents['OSWriteBytes'] ) as os_write_bytes ) as OSWriteBytesExcludePageCache, + formatReadableSize(sum(ProfileEvents['OSReadChars'] ) as os_read_chars ) as OSReadBytesIncludePageCache, + formatReadableSize(sum(ProfileEvents['OSWriteChars'] ) as os_write_chars ) as OSWriteCharsIncludePageCache, + formatReadableSize(quantile(0.97)(memory_usage) as memory_usage_q97) as MemoryUsageQ97 , + sum(read_rows) AS ReadRows, + formatReadableSize(sum(read_bytes) as read_bytes_sum) AS ReadBytes, + sum(written_rows) AS WrittenRows, + formatReadableSize(sum(written_bytes) as written_bytes_sum) AS WrittenBytes, /* */ + sum(result_rows) AS ResultRows, + formatReadableSize(sum(result_bytes) as result_bytes_sum) AS ResultBytes +FROM clusterAllReplicas('{cluster}', system.query_log) +WHERE event_date >= today() AND type in (2,4)-- QueryFinish, ExceptionWhileProcessing +GROUP BY + GROUPING SETS ( + (normalized_query_hash, host), + (host), + ()) +ORDER BY OSCPUVirtualTime DESC +LIMIT 30 +FORMAT Vertical; +``` + +## A/B tests of the same query +``` +WITH + query_id='8c050082-428e-4523-847a-caf29511d6ba' AS first, + query_id='618e0c55-e21d-4630-97e7-5f82e2475c32' AS second, + arrayConcat(mapKeys(ProfileEvents), ['query_duration_ms', 'read_rows', 'read_bytes', 'written_rows', 'written_bytes', 'result_rows', 'result_bytes', 'memory_usage', 'normalized_query_hash', 'peak_threads_usage', 'query_cache_usage']) AS metrics, + arrayConcat(mapValues(ProfileEvents), [query_duration_ms, read_rows, read_bytes, written_rows, written_bytes, result_rows, result_bytes, memory_usage, normalized_query_hash, peak_threads_usage, toUInt64(query_cache_usage)]) AS metrics_values +SELECT + metrics[i] AS metric, + anyIf(metrics_values[i], first) AS v1, + anyIf(metrics_values[i], second) AS v2, + formatReadableQuantity(v1 - v2) +FROM clusterAllReplicas(default, system.query_log) +ARRAY JOIN arrayEnumerate(metrics) AS i +WHERE (first OR second) AND (type = 2) +GROUP BY metric +HAVING v1 != v2 +ORDER BY + (v2 - v1) / (v1 + v2) DESC, + v2 DESC, + metric ASC +``` + +Another variant +``` +WITH + toUUID('d18fb820-4075-49bf-8fa3-cd7e53b9d523') AS fast_query_id, + toUUID('22ffbcc0-c62a-4895-8105-ee9d7447a643') AS slow_query_id, + faster AS + ( + SELECT pe.1 AS event_name, pe.2 AS event_value + FROM + ( + SELECT ProfileEvents.Names, ProfileEvents.Values + FROM system.query_log + WHERE (query_id = fast_query_id ) AND (type = 'QueryFinish') AND (event_date = today()) + ) + ARRAY JOIN arrayZip(ProfileEvents.Names, ProfileEvents.Values) AS pe + ), + slower AS + ( + SELECT pe.1 AS event_name, pe.2 AS event_value + FROM + ( + SELECT ProfileEvents.Names, ProfileEvents.Values + FROM system.query_log + WHERE (query_id = slow_query_id) AND (type = 'QueryFinish') AND (event_date = today()) + ) + ARRAY JOIN arrayZip(ProfileEvents.Names, ProfileEvents.Values) AS pe + ) +SELECT + event_name, + formatReadableQuantity(slower.event_value) AS slower_value, + formatReadableQuantity(faster.event_value) AS faster_value, + round((slower.event_value - faster.event_value) / slower.event_value, 2) AS diff_q +FROM faster +LEFT JOIN slower USING (event_name) +WHERE diff_q > 0.05 +ORDER BY event_name ASC +SETTINGS join_use_nulls = 1 +``` + +## Find queries that were started but not finished at some moment in time ```sql SELECT @@ -60,3 +177,188 @@ from system.query_log where event_time between '2021-09-24 07:00:00' and '2021-09-24 09:00:00' group by query_id HAVING countIf(type=1) <> countIf(type!=1) ``` + +## Columns used in WHERE clauses +``` +WITH + any(query) AS q, + any(tables) AS _tables, + arrayJoin(extractAll(query, '\\b(?:PRE)?WHERE\\s+(.*?)\\s+(?:GROUP BY|ORDER BY|UNION|SETTINGS|FORMAT$)')) AS w, + any(columns) AS cols, + arrayFilter(x -> (position(w, extract(x, '\\.(`[^`]+`|[^\\.]+)$')) > 0), columns) AS c, + arrayJoin(c) AS c2 +SELECT + c2, + count() +FROM system.query_log +WHERE (event_time >= (now() - toIntervalDay(1))) + AND arrayExists(x -> (x LIKE '%target_table%'), tables) + AND (query ILIKE 'SELECT%') +GROUP BY c2 +ORDER BY count() ASC; +``` +Replace %target_table% with the actual table name (or pattern) you want to inspect. + +## Most‑selected columns + +``` +SELECT + col AS column, + count() AS hits +FROM system.query_log +ARRAY JOIN columns AS col -- expand the column list first +WHERE type = 'QueryFinish' + AND query_kind = 'Select' + AND event_time >= now() - INTERVAL 7 DAY + AND notEmpty(columns) +GROUP BY col +ORDER BY hits DESC +LIMIT 50; +``` + +## Most‑used functions + +``` +SELECT + f AS function, + count() AS hits +FROM system.query_log +ARRAY JOIN used_functions AS f -- used_aggregate_functions, used_aggregate_function_combinators +WHERE type = 'QueryFinish' + AND event_time >= now() - INTERVAL 7 DAY + AND notEmpty(used_functions) +GROUP BY f +ORDER BY hits DESC +LIMIT 50; +``` + +## query ranks +``` + +SELECT * +FROM +( +SELECT + *, + DENSE_RANK() OVER (PARTITION BY host ORDER BY cnt DESC) as rank_by_cnt, + DENSE_RANK() OVER (PARTITION BY host ORDER BY QueriesDuration DESC) as rank_by_duration, + DENSE_RANK() OVER (PARTITION BY host ORDER BY RealTime DESC) as rank_by_real_time, + DENSE_RANK() OVER (PARTITION BY host ORDER BY UserTime DESC) as rank_by_user_time, + DENSE_RANK() OVER (PARTITION BY host ORDER BY SystemTime DESC) as rank_by_system_time, + DENSE_RANK() OVER (PARTITION BY host ORDER BY DiskReadTime DESC) as rank_by_disk_read_time, + DENSE_RANK() OVER (PARTITION BY host ORDER BY DiskWriteTime DESC) as rank_by_disk_write_time, + DENSE_RANK() OVER (PARTITION BY host ORDER BY NetworkSendTime DESC) as rank_by_network_send_time, + DENSE_RANK() OVER (PARTITION BY host ORDER BY NetworkReceiveTime DESC) as rank_by_network_receive_time, + DENSE_RANK() OVER (PARTITION BY host ORDER BY OSIOWaitTime DESC) as rank_by_os_io_wait_time, + DENSE_RANK() OVER (PARTITION BY host ORDER BY OSCPUWaitTime DESC) as rank_by_os_cpu_wait_time, + DENSE_RANK() OVER (PARTITION BY host ORDER BY OSCPUVirtualTime DESC) as rank_by_os_cpu_virtual_time, + DENSE_RANK() OVER (PARTITION BY host ORDER BY NetworkReceiveBytes DESC) as rank_by_network_receive_bytes, + DENSE_RANK() OVER (PARTITION BY host ORDER BY NetworkSendBytes DESC) as rank_by_network_send_bytes, + DENSE_RANK() OVER (PARTITION BY host ORDER BY SelectedParts DESC) as rank_by_selected_parts, + DENSE_RANK() OVER (PARTITION BY host ORDER BY SelectedRanges DESC) as rank_by_selected_ranges, + DENSE_RANK() OVER (PARTITION BY host ORDER BY SelectedMarks DESC) as rank_by_selected_marks, + DENSE_RANK() OVER (PARTITION BY host ORDER BY SelectedRows DESC) as rank_by_selected_rows, + DENSE_RANK() OVER (PARTITION BY host ORDER BY SelectedBytes DESC) as rank_by_selected_bytes, + DENSE_RANK() OVER (PARTITION BY host ORDER BY FileOpen DESC) as rank_by_file_open, + DENSE_RANK() OVER (PARTITION BY host ORDER BY ZooKeeperTransactions DESC) as rank_by_zookeeper_transactions, + DENSE_RANK() OVER (PARTITION BY host ORDER BY OSReadBytesExcludePageCache DESC) as rank_by_os_read_bytes_exclude_page_cache, + DENSE_RANK() OVER (PARTITION BY host ORDER BY OSWriteBytesExcludePageCache DESC) as rank_by_os_write_bytes_exclude_page_cache, + DENSE_RANK() OVER (PARTITION BY host ORDER BY OSReadBytesIncludePageCache DESC) as rank_by_os_read_bytes_include_page_cache, + DENSE_RANK() OVER (PARTITION BY host ORDER BY OSWriteCharsIncludePageCache DESC) as rank_by_os_write_chars_include_page_cache, + DENSE_RANK() OVER (PARTITION BY host ORDER BY MemoryUsageQ97 DESC) as rank_by_memory_usage_q97, + DENSE_RANK() OVER (PARTITION BY host ORDER BY ReadRows DESC) as rank_by_read_rows, + DENSE_RANK() OVER (PARTITION BY host ORDER BY ReadBytes DESC) as rank_by_read_bytes, + DENSE_RANK() OVER (PARTITION BY host ORDER BY WrittenRows DESC) as rank_by_written_rows, + DENSE_RANK() OVER (PARTITION BY host ORDER BY WrittenBytes DESC) as rank_by_written_bytes, + DENSE_RANK() OVER (PARTITION BY host ORDER BY ResultRows DESC) as rank_by_result_rows, + DENSE_RANK() OVER (PARTITION BY host ORDER BY ResultBytes DESC) as rank_by_result_bytes +FROM +( +SELECT + hostName() as host, + normalized_query_hash, + min(event_time) as min_event_time, + max(event_time) as max_event_time, + replace(substr(argMax(query, utime), 1, 80), '\n', ' ') AS query, + argMax(query_id, utime) AS sample_query_id, + count() as cnt, + sum(query_duration_ms) / 1000 AS QueriesDuration, /* wall clock */ + sum(ProfileEvents['RealTimeMicroseconds']) / 1000000 AS RealTime, /* same as above but x number of thread */ + sum(ProfileEvents['UserTimeMicroseconds'] as utime) / 1000000 AS UserTime, /* time when our query was doin some cpu-insense work, creating cpu load */ + sum(ProfileEvents['SystemTimeMicroseconds']) / 1000000 AS SystemTime, /* time spend on waiting for some system operations */ + sum(ProfileEvents['DiskReadElapsedMicroseconds']) / 1000000 AS DiskReadTime, + sum(ProfileEvents['DiskWriteElapsedMicroseconds']) / 1000000 AS DiskWriteTime, + sum(ProfileEvents['NetworkSendElapsedMicroseconds']) / 1000000 AS NetworkSendTime, /* check the other side of the network! */ + sum(ProfileEvents['NetworkReceiveElapsedMicroseconds']) / 1000000 AS NetworkReceiveTime, /* check the other side of the network! */ + sum(ProfileEvents['OSIOWaitMicroseconds']) / 1000000 AS OSIOWaitTime, /* IO waits, usually disks - that metric is 'orthogonal' to other */ + sum(ProfileEvents['OSCPUWaitMicroseconds']) / 1000000 AS OSCPUWaitTime, /* waiting for a 'free' CPU - usually high when the other load on the server creates a lot of contention for cpu */ + sum(ProfileEvents['OSCPUVirtualTimeMicroseconds']) / 1000000 AS OSCPUVirtualTime, /* similar to usertime + system time */ + sum(ProfileEvents['NetworkReceiveBytes']) AS NetworkReceiveBytes, + sum(ProfileEvents['NetworkSendBytes']) AS NetworkSendBytes, + sum(ProfileEvents['SelectedParts']) as SelectedParts, + sum(ProfileEvents['SelectedRanges']) as SelectedRanges, + sum(ProfileEvents['SelectedMarks']) as SelectedMarks, + sum(ProfileEvents['SelectedRows']) as SelectedRows, /* those may different from read_rows - here the number or rows potentially matching the where conditions, not neccessary all will be read */ + sum(ProfileEvents['SelectedBytes']) as SelectedBytes, + sum(ProfileEvents['FileOpen']) as FileOpen, + sum(ProfileEvents['ZooKeeperTransactions']) as ZooKeeperTransactions, + sum(ProfileEvents['OSReadBytes'] ) as OSReadBytesExcludePageCache, + sum(ProfileEvents['OSWriteBytes'] ) as OSWriteBytesExcludePageCache, + sum(ProfileEvents['OSReadChars'] ) as OSReadBytesIncludePageCache, + sum(ProfileEvents['OSWriteChars'] ) as OSWriteCharsIncludePageCache, + quantile(0.97)(memory_usage) as MemoryUsageQ97 , + sum(read_rows) AS ReadRows, + sum(read_bytes) AS ReadBytes, + sum(written_rows) AS WrittenRows, + sum(written_bytes) AS WrittenBytes, /* */ + sum(result_rows) AS ResultRows, + sum(result_bytes) AS ResultBytes +FROM clusterAllReplicas('{cluster}', system.query_log) +WHERE event_time BETWEEN '2024-04-04 11:31:10' and '2024-04-04 12:36:50' AND type in (2,4)-- QueryFinish, ExceptionWhileProcessing +GROUP BY normalized_query_hash, host +) +) +WHERE +(rank_by_cnt <= 20 and cnt > 10) +OR (rank_by_duration <= 20 and QueriesDuration > 60) +OR (rank_by_real_time <= 20 and RealTime > 60) +OR (rank_by_user_time <= 20 and UserTime > 60) +OR (rank_by_system_time <= 20 and SystemTime > 60) +OR (rank_by_disk_read_time <= 20 and DiskReadTime > 60) +OR (rank_by_disk_write_time <= 20 and DiskWriteTime > 60) +OR (rank_by_network_send_time <= 20 and NetworkSendTime > 60) +OR (rank_by_network_receive_time <= 20 and NetworkReceiveTime > 60) +OR (rank_by_os_io_wait_time <= 20 and OSIOWaitTime > 60) +OR (rank_by_os_cpu_wait_time <= 20 and OSCPUWaitTime > 60) +OR (rank_by_os_cpu_virtual_time <= 20 and OSCPUVirtualTime > 60) +OR (rank_by_network_receive_bytes <= 20 and NetworkReceiveBytes > 500000000) +OR (rank_by_network_send_bytes <= 20 and NetworkSendBytes > 500000000) +OR (rank_by_selected_parts <= 20 and SelectedParts > 1000) +OR (rank_by_selected_ranges <= 20 and SelectedRanges > 1000) +OR (rank_by_selected_marks <= 20 and SelectedMarks > 1000) +OR (rank_by_selected_rows <= 20 and SelectedRows > 1000000) +OR (rank_by_selected_bytes <= 20 and SelectedBytes > 500000000) +OR (rank_by_file_open <= 20 and FileOpen > 1000) +OR (rank_by_zookeeper_transactions <= 20 and ZooKeeperTransactions > 10) +OR (rank_by_os_read_bytes_exclude_page_cache <= 20 and OSReadBytesExcludePageCache > 500000000) +OR (rank_by_os_write_bytes_exclude_page_cache <= 20 and OSWriteBytesExcludePageCache > 500000000) +OR (rank_by_os_read_bytes_include_page_cache <= 20 and OSReadBytesIncludePageCache > 500000000) +OR (rank_by_os_write_chars_include_page_cache <= 20 and OSWriteCharsIncludePageCache > 500000000) +OR (rank_by_memory_usage_q97 <= 20 and MemoryUsageQ97 > 500000000) +OR (rank_by_read_rows <= 20 and ReadRows > 100000) +OR (rank_by_read_bytes <= 20 and ReadBytes > 500000000) +OR (rank_by_written_rows <= 20 and WrittenRows > 100000) +OR (rank_by_written_bytes <= 20 and WrittenBytes > 500000000) +OR (rank_by_result_rows <= 20 and ResultRows > 100000) +OR (rank_by_result_bytes <= 20 and ResultBytes > 100000000) +ORDER BY rank_by_cnt*10 + rank_by_duration*10 + rank_by_real_time*10 + rank_by_user_time*10 + rank_by_system_time*10 + rank_by_disk_read_time*10 + rank_by_disk_write_time*5 + rank_by_network_send_time + rank_by_network_receive_time + rank_by_os_io_wait_time + rank_by_os_cpu_wait_time + rank_by_os_cpu_virtual_time*10 + rank_by_network_receive_bytes*8 + rank_by_network_send_bytes*8 + rank_by_selected_parts*5 + rank_by_selected_ranges*5 + rank_by_selected_marks*5 + rank_by_selected_rows*5 + rank_by_selected_bytes*5 + rank_by_file_open*5 + rank_by_zookeeper_transactions*5 + rank_by_os_read_bytes_exclude_page_cache*5 + rank_by_os_write_bytes_exclude_page_cache*5 + rank_by_os_read_bytes_include_page_cache*5 + rank_by_os_write_chars_include_page_cache*5 + rank_by_memory_usage_q97*10 + rank_by_read_rows*10 + rank_by_read_bytes*10 + rank_by_written_rows*8 + rank_by_written_bytes*8 + rank_by_result_rows*8 + rank_by_result_bytes*8 DESC +``` + +## Other resources + +- [Compare query_log for 2 intervals](https://kb.altinity.com/altinity-kb-useful-queries/compare_query_log_for_2_intervals/) +- [Monitoring INSERT Queries](https://clickhouse.com/blog/monitoring-troubleshooting-insert-queries-clickhouse) +- [Monitoring SELECT Queries](https://clickhouse.com/blog/monitoring-troubleshooting-select-queries-clickhouse) +- [SYSTEM TABLES](https://clickhouse.com/blog/clickhouse-debugging-issues-with-system-tables) +- [Know Your Clickhouse](https://azat.sh/presentations/2022-know-your-clickhouse/) + diff --git a/content/en/altinity-kb-useful-queries/remove_empty_partitions_from_rq.md b/content/en/altinity-kb-useful-queries/remove_empty_partitions_from_rq.md new file mode 100644 index 0000000000..47ff72eba9 --- /dev/null +++ b/content/en/altinity-kb-useful-queries/remove_empty_partitions_from_rq.md @@ -0,0 +1,22 @@ +--- +title: "Removing tasks in the replication queue related to empty partitions" +linkTitle: "Removing tasks in the replication queue related to empty partitions" +weight: 100 +description: >- + Removing tasks in the replication queue related to empty partitions +--- + +## Removing tasks in the replication queue related to empty partitions + +``` +SELECT 'ALTER TABLE ' || database || '.' || table || ' DROP PARTITION ID \''|| partition_id || '\';' FROM +(SELECT DISTINCT database, table, extract(new_part_name, '^[^_]+') as partition_id FROM clusterAllReplicas('{cluster}', system.replication_queue) ) as rq +LEFT JOIN +(SELECT database, table, partition_id, sum(rows) as rows_count, count() as part_count +FROM clusterAllReplicas('{cluster}', system.parts) +WHERE active GROUP BY database, table, partition_id +) as p +USING (database, table, partition_id) +WHERE p.rows_count = 0 AND p.part_count = 0 +FORMAT TSVRaw; +``` diff --git a/content/en/altinity-kb-useful-queries/remove_unneeded_block_numbers.md b/content/en/altinity-kb-useful-queries/remove_unneeded_block_numbers.md new file mode 100644 index 0000000000..be12b1084e --- /dev/null +++ b/content/en/altinity-kb-useful-queries/remove_unneeded_block_numbers.md @@ -0,0 +1,76 @@ +--- +title: "Remove block numbers from zookeeper for removed partitions" +linkTitle: "Remove block numbers from zookeeper for removed partitions" +weight: 100 +description: >- + +--- + +## Remove block numbers from zookeeper for removed partitions + +```sql +SELECT distinct concat('delete ', zk.block_numbers_path, zk.partition_id) FROM +( + SELECT r.database, r.table, zk.block_numbers_path, zk.partition_id, p.partition_id + FROM + ( + SELECT path as block_numbers_path, name as partition_id + FROM system.zookeeper + WHERE path IN ( + SELECT concat(zookeeper_path, '/block_numbers/') as block_numbers_path + FROM clusterAllReplicas('{cluster}',system.replicas) + ) + ) as zk + LEFT JOIN + ( + SELECT database, table, concat(zookeeper_path, '/block_numbers/') as block_numbers_path + FROM clusterAllReplicas('{cluster}',system.replicas) + ) + as r ON (r.block_numbers_path = zk.block_numbers_path) + LEFT JOIN + ( + SELECT DISTINCT partition_id, database, table + FROM clusterAllReplicas('{cluster}',system.parts) + ) + as p ON (p.partition_id = zk.partition_id AND p.database = r.database AND p.table = r.table) + WHERE p.partition_id = '' AND zk.partition_id <> 'all' + ORDER BY r.database, r.table, zk.block_numbers_path, zk.partition_id, p.partition_id +) t +FORMAT TSVRaw; +``` + +## After 24.3 + +``` +WITH + now() - INTERVAL 120 DAY as retain_old_partitions, + replicas AS (SELECT DISTINCT database, table, zookeeper_path || '/block_numbers' AS block_numbers_path FROM system.replicas), + zk_data AS (SELECT DISTINCT name as partition_id, path as block_numbers_path FROM system.zookeeper WHERE path IN (SELECT block_numbers_path FROM replicas) AND mtime < retain_old_partitions AND partition_id <> 'all'), + zk_partitions AS (SELECT DISTINCT database, table, partition_id FROM replicas JOIN zk_data USING block_numbers_path), + partitions AS (SELECT DISTINCT database, table, partition_id FROM system.parts) +SELECT + format('ALTER TABLE `{}`.`{}` {};',database, table, arrayStringConcat( arraySort(groupArray('FORGET PARTITION ID \'' || partition_id || '\'')), ', ')) AS query +FROM zk_partitions +WHERE (database, table, partition_id) NOT IN (SELECT * FROM partitions) +GROUP BY database, table +ORDER BY database, table +FORMAT TSVRaw; +``` + +## After fixing https://github.com/ClickHouse/ClickHouse/issues/72807 + +``` +WITH + now() - INTERVAL 120 DAY as retain_old_partitions, + replicas AS (SELECT DISTINCT database, table, zookeeper_path || '/block_numbers' AS block_numbers_path FROM clusterAllReplicas('{cluster}',system.replicas)), + zk_data AS (SELECT DISTINCT name as partition_id, path as block_numbers_path FROM system.zookeeper WHERE path IN (SELECT block_numbers_path FROM replicas) AND mtime < retain_old_partitions AND partition_id <> 'all'), + zk_partitions AS (SELECT DISTINCT database, table, partition_id FROM replicas JOIN zk_data USING block_numbers_path), + partitions AS (SELECT DISTINCT database, table, partition_id FROM clusterAllReplicas('{cluster}',system.parts)) +SELECT + format('ALTER TABLE `{}`.`{}` ON CLUSTER \'{{cluster}}\' {};',database, table, arrayStringConcat( arraySort(groupArray('FORGET PARTITION ID \'' || partition_id || '\'')), ', ')) AS query +FROM zk_partitions +WHERE (database, table, partition_id) NOT IN (SELECT * FROM partitions) +GROUP BY database, table +ORDER BY database, table +FORMAT TSVRaw; +``` diff --git a/content/en/altinity-kb-useful-queries/table-meta-in-zookeeper.md b/content/en/altinity-kb-useful-queries/table-meta-in-zookeeper.md index d418cb52fa..ddcfd873a2 100644 --- a/content/en/altinity-kb-useful-queries/table-meta-in-zookeeper.md +++ b/content/en/altinity-kb-useful-queries/table-meta-in-zookeeper.md @@ -10,7 +10,7 @@ description: >- > Metadata on replica is not up to date with common metadata in Zookeeper -``` +```sql SELECT *, if( neighbor(name, -1) == name and name != 'is_active', neighbor(value, -1) == value , 1) as looks_good FROM ( SELECT @@ -38,6 +38,6 @@ ORDER BY vs. -``` +```sql SELECT metadata_modification_time, create_table_query FROM system.tables WHERE name = 'test_repl' ``` diff --git a/content/en/altinitycloud/_index.md b/content/en/altinitycloud/_index.md new file mode 100644 index 0000000000..bc7decfc35 --- /dev/null +++ b/content/en/altinitycloud/_index.md @@ -0,0 +1,8 @@ +--- +title: "Altinity Kubernetes Operator for ClickHouse®" +linkTitle: "Altinity Kubernetes Operator for ClickHouse®" +description: > + Redirect page +draft: true +--- + diff --git a/content/en/altinitycloud/altinity-cloud-connections/clickhouseclient.md b/content/en/altinitycloud/altinity-cloud-connections/clickhouseclient.md new file mode 100644 index 0000000000..c8f326ad97 --- /dev/null +++ b/content/en/altinitycloud/altinity-cloud-connections/clickhouseclient.md @@ -0,0 +1,4 @@ +--- +type: redirect +target: https://docs.altinity.com/altinitycloud/altinity-cloud-connections/clickhouseclient +--- diff --git a/content/en/altinitykubernetesoperator/_index.md b/content/en/altinitykubernetesoperator/_index.md new file mode 100644 index 0000000000..a7eb5cdac3 --- /dev/null +++ b/content/en/altinitykubernetesoperator/_index.md @@ -0,0 +1,8 @@ +--- +title: "Altinity Kubernetes Operator Install Guide" +linkTitle: "Altinity Kubernetes Operator Install Guide" +description: > + Redirect page +draft: true +--- + diff --git a/content/en/altinitykubernetesoperator/kubernetesinstallguide/minikubeonlinux.md b/content/en/altinitykubernetesoperator/kubernetesinstallguide/minikubeonlinux.md new file mode 100644 index 0000000000..ad2121513f --- /dev/null +++ b/content/en/altinitykubernetesoperator/kubernetesinstallguide/minikubeonlinux.md @@ -0,0 +1,5 @@ +--- +type: redirect +target: https://docs.altinity.com/altinitykubernetesoperator/kubernetesinstallguide/minikubeonlinux/ +alias: altinitykubernetesoperator/kubernetesinstallguide +--- diff --git a/content/en/clickhouse_training/_index.md b/content/en/clickhouse_training/_index.md new file mode 100644 index 0000000000..831cc66ddc --- /dev/null +++ b/content/en/clickhouse_training/_index.md @@ -0,0 +1,7 @@ +--- +title: "ClickHouse® Training" +linkTitle: "ClickHouse® Admin Training" +manualLink: https://hubs.la/Q02mylhn0 +weight: 1010 +draft: true +--- diff --git a/content/en/engines/_index.md b/content/en/engines/_index.md index b7dbce2b47..53e16cbb13 100644 --- a/content/en/engines/_index.md +++ b/content/en/engines/_index.md @@ -5,26 +5,28 @@ keywords: - clickhouse engine - clickhouse mergetree description: > - Learn about ClickHouse engines, from MergeTree, Atomic Database to RocksDB. + Learn about ClickHouse® engines, from MergeTree, Atomic Database to RocksDB. weight: 1 --- -Generally: the **main** engine in Clickhouse is called [MergeTree](https://clickhouse.yandex/docs/en/table_engines/mergetree/). It allows to store and process data on one server and feel all the advantages of Clickhouse. Basic usage of MergeTree does not require any special configuration, and you can start using it 'out of the box'. +Generally: the **main** engine in ClickHouse® is called [MergeTree](/engines/mergetree-table-engine-family/). It allows to store and process data on one server and feel all the advantages of ClickHouse. Basic usage of MergeTree does not require any special configuration, and you can start using it 'out of the box'. But one server and one copy of data are not fault-tolerant - something can happen with the server itself, with datacenter availability, etc. So you need to have the replica(s) - i.e. server(s) with the same data and which can 'substitute' the original server at any moment. -To have an extra copy (replica) of your data you need to use [ReplicatedMergeTree](https://clickhouse.yandex/docs/en/table_engines/replication/) engine. It can be used _instead_ of MergeTree engine, and you can always upgrade from MergeTree to ReplicatedMergeTree (and downgrade back) if you need. To use that you need to have ZooKeeper installed and running. For tests, you can use one standalone Zookeeper instance, but for production usage, you should have zookeeper ensemble at least of 3 servers. +To have an extra copy (replica) of your data you need to use [ReplicatedMergeTree](/altinity-kb-setup-and-maintenance/altinity-kb-converting-mergetree-to-replicated/) engine. It can be used _instead_ of MergeTree engine, and you can always upgrade from MergeTree to ReplicatedMergeTree (and downgrade back) if you need. To use that you need to have +[ZooKeeper installed](https://docs.altinity.com/operationsguide/clickhouse-zookeeper/zookeeper-installation/) +and running. For tests, you can use one standalone Zookeeper instance, but for production usage, you should have zookeeper ensemble at least of 3 servers. -When you use ReplicatedMergeTree then the inserted data is copied automatically to all the replicas, but all the SELECTs are executed on the single server you have connected to. So you can have 5 replicas of your data, but if you will always connect to one replica - it will not 'share' / 'balance' that traffic automatically between all the replicas, one server will be loaded and the rest will generally do nothing. If you need that balancing of load between multiple replicas - you can use the internal 'loadbalancer' mechanism which is provided by [Distributed](https://clickhouse.yandex/docs/en/table_engines/distributed/) engine of Clickhouse. As an alternative in that scenario you can work without Distributed table, but with some external load balancer that will balance the requests between several replicas according to your specific rules or preferences, or just cluster-aware client which will pick one of the servers for the query time. +When you use ReplicatedMergeTree then the inserted data is copied automatically to all the replicas, but all the SELECTs are executed on the single server you have connected to. So you can have 5 replicas of your data, but if you will always connect to one replica - it will not 'share' / 'balance' that traffic automatically between all the replicas, one server will be loaded and the rest will generally do nothing. If you need that balancing of load between multiple replicas - you can use the internal 'loadbalancer' mechanism which is provided by Distributed engine of ClickHouse. As an alternative in that scenario you can work without [Distributed table](/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/distributed-table-cluster/), but with some external load balancer that will balance the requests between several replicas according to your specific rules or preferences, or just cluster-aware client which will pick one of the servers for the query time. -The Distributed engine does not store any data, but it can 'point' to the same ReplicatedMergeTree/MergeTree table on multiple servers. To use Distributed engine you need to configure `` settings in your ClickHouse server config file. +The Distributed engine does not store any data, but it can 'point' to the same ReplicatedMergeTree/MergeTree table on multiple servers. To use Distributed engine you need to configure `` settings in your ClickHouse server config file. -So let's say you have 3 replicas of table `my_replicated_data` with ReplicatedMergeTree engine. You can create a table with Distrtibuted engine called `my_distributed_replicated_data` which will 'point' to all of that 3 servers, and when you will select from that `my_distributed_replicated_data table` the select will be forwarded and executed on one of the replicas. So in that scenario, each replica will get 1/3 of requests (but each request still will be fully executed on one chosen replica). +So let's say you have 3 replicas of table `my_replicated_data` with ReplicatedMergeTree engine. You can create a table with Distributed engine called `my_distributed_replicated_data` which will 'point' to all of that 3 servers, and when you will select from that `my_distributed_replicated_data table` the select will be forwarded and executed on one of the replicas. So in that scenario, each replica will get 1/3 of requests (but each request still will be fully executed on one chosen replica). All that is great, and will work well while one copy of your data is fitting on a single physical server, and can be processed by the resources of one server. When you have too much data to be stored/processed on one server - you need to use sharding (it's just a way to split the data into smaller parts). Sharding is the mechanism also provided by Distributed engine. With sharding data is divided into parts (shards) according to some sharding key. You can just use random distribution, so let's say - throw a coin to decide on each of the servers the data should be stored, or you can use some 'smarter' sharding scheme, to make the data connected to the same subject (let's say to the same customer) stored on one server, and to another subject on another. So in that case all the shards should be requested at the same time and later the 'common' result should be calculated. -In ClickHouse each shard works independently and process its' part of data, inside each shard replication can work. And later to query all the shards at the same time and combine the final result - Distributed engine is used. So Distributed work as load balancer inside each shard, and can combine the data coming from different shards together to make the 'common' result. +In ClickHouse each shard works independently and process its part of data, inside each shard replication can work. And later to query all the shards at the same time and combine the final result - Distributed engine is used. So Distributed work as load balancer inside each shard, and can combine the data coming from different shards together to make the 'common' result. You can use Distributed table for inserts, in that case, it will pass the data to one of the shards according to the sharding key. Or you can insert to the underlying table on one of the shards bypassing the Distributed table. @@ -33,23 +35,15 @@ You can use Distributed table for inserts, in that case, it will pass the data t 1. start with MergeTree 2. to have several copies of data use ReplicatedMergeTree 3. if your data is too big to fit/ to process on one server - use sharding -4. to balance the load between replicas and to combine the result of selects from different shards - use Distributed table. +4. to balance the load between replicas and to combine the result of selects from different shards - use [Distributed table](/altinity-kb-setup-and-maintenance/altinity-kb-data-migration/distributed-table-cluster/). #### More -Official tutorial clarify that a bit: [https://clickhouse.yandex/tutorial.html](https://clickhouse.yandex/tutorial.html) - -Please check also [@alex-zaitsev](https://github.com/alex-zaitsev) presentation, which also covers that subject: [https://www.youtube.com/watch?v=zbjub8BQPyE](https://www.youtube.com/watch?v=zbjub8BQPyE) +Please check [@alex-zaitsev](https://github.com/alex-zaitsev) presentation, which covers that subject: [https://www.youtube.com/watch?v=zbjub8BQPyE](https://www.youtube.com/watch?v=zbjub8BQPyE) ( Slides are here: [https://yadi.sk/i/iLA5ssAv3NdYGy](https://yadi.sk/i/iLA5ssAv3NdYGy) ) -P.S. Actually you can create replication without Zookeeper and ReplicatedMergeTree, just by using the Distributed table above MergeTree and internal_replication=false cluster setting, but in that case, there will no guarantee that all the replicas will have 100% the same data, so I rather would not recommend that scenario. - -[altinity-kb-atomic-database-engine/" ]({{}}) - -[altinity-kb-embeddedrocksdb-and-dictionary.md" ]({{}}) - -[mergetree-table-engine-family/altinity-kb-nulls-in-order-by.md" ]({{}}) +P.S. Actually you can create replication without Zookeeper and ReplicatedMergeTree, just by using the Distributed table above MergeTree and internal_replication=false cluster setting, but in that case, there will be no guarantee that all the replicas will have 100% the same data, so I rather would not recommend that scenario. -[mergetree-table-engine-family/replacingmergetree/altinity-kb-replacingmergetree-does-not-collapse-duplicates.md" ]({{}}) +See also: [ReplacingMergeTree does not collapse duplicates]({{}}) Based on my original answer on github: [https://github.com/ClickHouse/ClickHouse/issues/2161](https://github.com/ClickHouse/ClickHouse/issues/2161) diff --git a/content/en/engines/altinity-kb-atomic-database-engine/_index.md b/content/en/engines/altinity-kb-atomic-database-engine/_index.md index d7d3cc5728..a5f8621229 100644 --- a/content/en/engines/altinity-kb-atomic-database-engine/_index.md +++ b/content/en/engines/altinity-kb-atomic-database-engine/_index.md @@ -1,16 +1,21 @@ --- -title: "Atomic Database Engine" +title: "ClickHouse® Atomic Database Engine" linkTitle: "Atomic Database Engine" description: > - Atomic Database Engine + Capabilities of the Atomic database engine +keywords: + - clickhouse atomic + - clickhouse atomic engine +aliases: + /engines/altinity-kb-atomic-database-engine/altinity-kb-implementation-details --- -In version 20.5 ClickHouse first introduced database engine=Atomic. +In version 20.5, ClickHouse® first introduced `database engine=Atomic`. Since version 20.10 it is a default database engine (before engine=Ordinary was used). Those 2 database engine differs in a way how they store data on a filesystem, and engine Atomic allows to resolve some of the issues existed in engine=Ordinary. -engine=Atomic supports +`engine=Atomic` supports * non-blocking drop table / rename table * tables delete (&detach) async (wait for selects finish but invisible for new selects) @@ -35,9 +40,9 @@ Also, you can decrease the delay used by Atomic for real table drop (it’s 8 mi ```bash cat /etc/clickhouse-server/config.d/database_atomic_delay_before_drop_table.xml - + 1 - + ``` ### **Q. I cannot reuse zookeeper path after dropping the table.** @@ -66,9 +71,9 @@ SHOW CREATE TABLE xxx; /* or SELECT create_table_query FROM system.tables WHERE ### Q. Should I use Atomic or Ordinary for new setups? -All things inside clickhouse itself should work smoothly with `Atomic`. +All things inside ClickHouse itself should work smoothly with `Atomic`. -But some external tools - backup tools, things involving other kinds of direct manipulations with clickhouse files & folders may have issues with `Atomic`. +But some external tools - backup tools, things involving other kinds of direct manipulations with ClickHouse files & folders may have issues with `Atomic`. `Ordinary` layout on the filesystem is simpler. And the issues which address Atomic (lock-free renames, drops, atomic exchange of table) are not so critical in most cases. @@ -88,9 +93,9 @@ But some external tools - backup tools, things involving other kinds of direct m external tool support -
(like clickhouse-backup) +
(like clickhouse-backup) + good / mature good / mature - limited / beta @@ -160,13 +165,13 @@ description: > cat /etc/clickhouse-server/users.d/disable_atomic_database.xml --- - + Ordinary - + ``` ## Other sources diff --git a/content/en/engines/altinity-kb-atomic-database-engine/how-to-convert-ordinary-to-atomic.md b/content/en/engines/altinity-kb-atomic-database-engine/how-to-convert-ordinary-to-atomic.md index f998fa2513..4141282930 100644 --- a/content/en/engines/altinity-kb-atomic-database-engine/how-to-convert-ordinary-to-atomic.md +++ b/content/en/engines/altinity-kb-atomic-database-engine/how-to-convert-ordinary-to-atomic.md @@ -2,52 +2,31 @@ title: "How to Convert Ordinary to Atomic" linkTitle: "How to Convert Ordinary to Atomic" weight: 100 -description: >- - Clickhouse Howto Convert Ordinary to Atomic --- -## Example How to Convert Ordinary to Atomic +## New, official way -```sql -CREATE DATABASE db ENGINE=Ordinary; -CREATE TABLE db.test(A Int64) ENGINE=MergeTree ORDER BY A; - -CREATE MATERIALIZED VIEW db.test_mv(A Int64) -ENGINE=MergeTree ORDER BY A AS SELECT * FROM db.test; - -INSERT INTO db.test SELECT * FROM numbers(1000); - -CREATE DATABASE db_temp ENGINE=Atomic; - -RENAME TABLE db.test TO db_temp.test; -RENAME TABLE db.test_mv TO db_temp.test_mv; - -DROP DATABASE db; -RENAME DATABASE db_temp TO db; - -USE db; -SHOW TABLES; -┌─name───────────────────────────────────────────┐ -│ .inner_id.37db402c-fc46-421d-b7db-402cfc46921d │ -│ test │ -│ test_mv │ -└────────────────────────────────────────────────┘ - -INSERT INTO db.test SELECT * FROM numbers(1000); +* Implemented automatic conversion of database engine from `Ordinary` to `Atomic` (ClickHouse® Server 22.8+). Create empty `convert_ordinary_to_atomic` file in `flags` directory and all `Ordinary` databases will be converted automatically on next server start. +* The conversion is not automatic between upgrades, you need to set the flag as explained below: +``` +Warnings: + * Server has databases (for example `test`) with Ordinary engine, which was deprecated. To convert this database to the new Atomic engine, create a flag /var/lib/clickhouse/flags/convert_ordinary_to_atomic and make sure that ClickHouse has write permission for it. +Example: sudo touch '/var/lib/clickhouse/flags/convert_ordinary_to_atomic' && sudo chmod 666 '/var/lib/clickhouse/flags/convert_ordinary_to_atomic' +``` +* Resolves [#39546](https://github.com/ClickHouse/ClickHouse/issues/39546). [#39933](https://github.com/ClickHouse/ClickHouse/pull/39933) ([Alexander Tokmakov](https://github.com/tavplubix)) -SELECT count() FROM test; -┌─count()─┐ -│ 2000 │ -└─────────┘ +* There can be some problems if the `default` database is Ordinary and fails for some reason. You can add: -SELECT count() FROM test_mv; -┌─count()─┐ -│ 2000 │ -└─────────┘ +``` + + 1 + +``` +[More detailed info here](https://github.com/ClickHouse/ClickHouse/blob/f01a285f6091265cfae72bb7fbf3186269804891/src/Interpreters/loadMetadata.cpp#L150) -SHOW CREATE DATABASE db; -┌─statement─────────────────────────┐ -│ CREATE DATABASE db -ENGINE = Atomic │ -└───────────────────────────────────┘ +Don't forget to remove detached parts from all Ordinary databases, or you can get the error: +``` +│ 2025.01.28 11:34:57.510330 [ 7 ] {} Application: Code: 219. DB::Exception: Cannot drop: filesystem error: in remove: Directory not empty ["/var/lib/clickhouse/data/db/"]. Probably data │ +│ base contain some detached tables or metadata leftovers from Ordinary engine. If you want to remove all data anyway, try to attach database back and drop it again with enabled force_remove_data_recursively_ │ ``` + diff --git a/content/en/engines/altinity-kb-embeddedrocksdb-and-dictionary.md b/content/en/engines/altinity-kb-embeddedrocksdb-and-dictionary.md index b5b03ba12e..faa4df3d6b 100644 --- a/content/en/engines/altinity-kb-embeddedrocksdb-and-dictionary.md +++ b/content/en/engines/altinity-kb-embeddedrocksdb-and-dictionary.md @@ -4,7 +4,9 @@ linkTitle: "EmbeddedRocksDB & dictionary" description: > EmbeddedRocksDB & dictionary --- -RocksDB is faster than MergeTree on Key/Value queries because MergeTree primary key index is sparse. Probably it's possible to speedup MergeTree by reducing `index_granularity`. +RocksDB is faster than +[MergeTree](/engines/mergetree-table-engine-family/) +on Key/Value queries because MergeTree primary key index is sparse. Probably it's possible to speedup MergeTree by reducing `index_granularity`. NVMe disk is used for the tests. diff --git a/content/en/engines/mergetree-table-engine-family/aggregatingmergetree.md b/content/en/engines/mergetree-table-engine-family/aggregatingmergetree.md index 8fc8924bd8..0cf3acdbbe 100644 --- a/content/en/engines/mergetree-table-engine-family/aggregatingmergetree.md +++ b/content/en/engines/mergetree-table-engine-family/aggregatingmergetree.md @@ -1,10 +1,13 @@ --- -title: "AggregatingMergeTree" +title: "ClickHouse® AggregatingMergeTree" linkTitle: "AggregatingMergeTree" description: > - AggregatingMergeTree + FAQs for storing and merging pre-aggregated data +keywords: + - clickhouse aggregatingmergetree + - aggregatingmergetree --- -Q. What happens with columns which are nor the part of ORDER BY key, nor have the AggregateFunction type? +Q. What happens with columns which are not part of the [ORDER BY](/engines/mergetree-table-engine-family/pick-keys/) key, nor have the AggregateFunction type? A. it picks the first value met, (similar to `any`) @@ -96,5 +99,80 @@ FINAL │ 1 │ 2020-01-16 20:57:46 │ 2020-01-16 20:57:51 │ └──────┴─────────────────────┴─────────────────────┘ -1 rows in set. Elapsed: 0.003 sec. +1 rows in set. Elapsed: 0.003 sec. ``` + +## Merge two data streams + +Q. I have 2 Kafka topics from which I am storing events into 2 different tables (A and B) having the same unique ID. I want to create a single table that combines the data in tables A and B into one table C. The problem is that data is received asynchronously and not all the data is available when a row arrives in Table A or vice-versa. + +A. You can use AggregatingMergeTree with Nullable columns and any aggregation function or Non-Nullable column and max aggregation function if it is acceptable for your data. + +``` +CREATE TABLE table_C ( + id Int64, + colA SimpleAggregatingFunction(any,Nullable(UInt32)), + colB SimpleAggregatingFunction(max, String) +) ENGINE = AggregatingMergeTree() +ORDER BY id; + +CREATE MATERIALIZED VIEW mv_A TO table_C AS +SELECT id,colA FROM Kafka_A; + +CREATE MATERIALIZED VIEW mv_B TO table_C AS +SELECT id,colB FROM Kafka_B; +``` + +Here is a more complicated example (from here https://gist.github.com/den-crane/d03524eadbbce0bafa528101afa8f794) +``` +CREATE TABLE states_raw( + d date, + uid UInt64, + first_name String, + last_name String, + modification_timestamp_mcs DateTime64(3) default now64(3) +) ENGINE = Null; + +CREATE TABLE final_states_by_month( + d date, + uid UInt64, + final_first_name AggregateFunction(argMax, String, DateTime64(3)), + final_last_name AggregateFunction(argMax, String, DateTime64(3))) +ENGINE = AggregatingMergeTree +PARTITION BY toYYYYMM(d) +ORDER BY (uid, d); + +CREATE MATERIALIZED VIEW final_states_by_month_mv TO final_states_by_month AS +SELECT + d, uid, + argMaxState(first_name, if(first_name<>'', modification_timestamp_mcs, toDateTime64(0,3))) AS final_first_name, + argMaxState(last_name, if(last_name<>'', modification_timestamp_mcs, toDateTime64(0,3))) AS final_last_name +FROM states_raw +GROUP BY d, uid; + + +insert into states_raw(d,uid,first_name) values (today(), 1, 'Tom'); +insert into states_raw(d,uid,last_name) values (today(), 1, 'Jones'); +insert into states_raw(d,uid,first_name,last_name) values (today(), 2, 'XXX', ''); +insert into states_raw(d,uid,first_name,last_name) values (today(), 2, 'YYY', 'YYY'); + + +select uid, argMaxMerge(final_first_name) first_name, argMaxMerge(final_last_name) last_name +from final_states_by_month group by uid + +┌─uid─┬─first_name─┬─last_name─┐ +│ 2 │ YYY │ YYY │ +│ 1 │ Tom │ Jones │ +└─────┴────────────┴───────────┘ + +optimize table final_states_by_month final; + +select uid, finalizeAggregation(final_first_name) first_name, finalizeAggregation(final_last_name) last_name +from final_states_by_month + +┌─uid─┬─first_name─┬─last_name─┐ +│ 1 │ Tom │ Jones │ +│ 2 │ YYY │ YYY │ +└─────┴────────────┴───────────┘ +``` + diff --git a/content/en/engines/mergetree-table-engine-family/altinity-kb-nulls-in-order-by.md b/content/en/engines/mergetree-table-engine-family/altinity-kb-nulls-in-order-by.md index c78db16b52..b6a9e80fbc 100644 --- a/content/en/engines/mergetree-table-engine-family/altinity-kb-nulls-in-order-by.md +++ b/content/en/engines/mergetree-table-engine-family/altinity-kb-nulls-in-order-by.md @@ -7,7 +7,7 @@ description: > 1) It is NOT RECOMMENDED for a general use 2) Use on your own risk -3) Use latest ClickHouse version if you need that. +3) Use latest ClickHouse® version if you need that. ```sql CREATE TABLE x diff --git a/content/en/engines/mergetree-table-engine-family/collapsing-vs-replacing.md b/content/en/engines/mergetree-table-engine-family/collapsing-vs-replacing.md index dc71219c67..171a131215 100644 --- a/content/en/engines/mergetree-table-engine-family/collapsing-vs-replacing.md +++ b/content/en/engines/mergetree-table-engine-family/collapsing-vs-replacing.md @@ -2,16 +2,14 @@ title: "CollapsingMergeTree vs ReplacingMergeTree" linkTitle: "CollapsingMergeTree vs ReplacingMergeTree" weight: 100 -description: >- - CollapsingMergeTree vs ReplacingMergeTree. --- ## CollapsingMergeTree vs ReplacingMergeTree -| ReplacingMergeTree | CollapsingMergeTree | -|:-|:-| -| + very easy to use (always replace) | - more complex (accounting-alike, put 'rollback' records to fix something) | -| + you don't need to store the previous state of the row | - you need to the store (somewhere) the previous state of the row, OR extract it from the table itself (point queries is not nice for ClickHouse) | -| - no deletes | + support deletes | +| ReplacingMergeTree | CollapsingMergeTree | +|:----------------------------------------------------------------------------------------------------|:-| +| + very easy to use (always replace) | - more complex (accounting-alike, put 'rollback' records to fix something) | +| + you don't need to store the previous state of the row | - you need to the store (somewhere) the previous state of the row, OR extract it from the table itself (point queries is not nice for ClickHouse®) | +| - no deletes | + support deletes | | - w/o FINAL - you can can always see duplicates, you need always to 'pay' FINAL performance penalty | + properly crafted query can give correct results without final (i.e. `sum(amount * sign)` will be correct, no matter of you have duplicated or not) | -| - only `uniq()`-alike things can be calculated in materialied views | + you can do basic counts & sums in materialized views | +| - only `uniq()`-alike things can be calculated in materialized views | + you can do basic counts & sums in materialized views | diff --git a/content/en/engines/mergetree-table-engine-family/merge-performance-final-optimize-by.md b/content/en/engines/mergetree-table-engine-family/merge-performance-final-optimize-by.md index d8910d4287..84be6bb892 100644 --- a/content/en/engines/mergetree-table-engine-family/merge-performance-final-optimize-by.md +++ b/content/en/engines/mergetree-table-engine-family/merge-performance-final-optimize-by.md @@ -1,8 +1,6 @@ --- title: "Merge performance and OPTIMIZE FINAL" linkTitle: "Merge performance and OPTIMIZE FINAL" -description: > - Merge performance and OPTIMIZE FINAL DEDUPLICATE BY expr --- ## Merge Performance @@ -37,9 +35,11 @@ ALTER TABLE test MODIFY SETTING enable_vertical_merge_algorithm = 0 ## OPTIMIZE TABLE example FINAL DEDUPLICATE BY expr -When using deduplicate feature in `OPTIMIZE FINAL`, the question is which row will remain and won't be deduped? +When using +[deduplicate](/altinity-kb-schema-design/row-level-deduplication/) +feature in `OPTIMIZE FINAL`, the question is which row will remain and won't be deduped? -For SELECT operations Clickhouse does not guarantee the order of the resultset unless you specify ORDER BY. This random ordering is affected by different parameters, like for example `max_threads`. +For SELECT operations ClickHouse® does not guarantee the order of the resultset unless you specify ORDER BY. This random ordering is affected by different parameters, like for example `max_threads`. In a merge operation ClickHouse reads rows sequentially in storage order, which is determined by ORDER BY specified in CREATE TABLE statement, and only the first unique row in that order survives deduplication. So it is a bit different from how SELECT actually works. As FINAL clause is used then ClickHouse will merge all rows across all partitions (If it is not specified then the merge operation will be done per partition), and so the first unique row of the first partition will survive deduplication. Merges are single-threaded because it is too complicated to apply merge ops in-parallel, and it generally makes no sense. diff --git a/content/en/engines/mergetree-table-engine-family/part-naming-and-mvcc.md b/content/en/engines/mergetree-table-engine-family/part-naming-and-mvcc.md index 3b3d4a4db4..27e0abbca8 100644 --- a/content/en/engines/mergetree-table-engine-family/part-naming-and-mvcc.md +++ b/content/en/engines/mergetree-table-engine-family/part-naming-and-mvcc.md @@ -50,7 +50,7 @@ As you can see every insert creates a new incremental block_number which is writ Those block numbering works in the scope of partition (for Replicated table) or globally across all partition (for plain MergeTree table). -ClickHouse always merge only continuous blocks . And new part names always refer to the minimum and maximum block numbers. +ClickHouse® always merge only continuous blocks . And new part names always refer to the minimum and maximum block numbers. ``` OPTIMIZE TABLE part_names; @@ -63,7 +63,7 @@ OPTIMIZE TABLE part_names; As you can see here - three parts (with block number 1,2,3) were merged and they formed the new part with name 1_3 as min/max block size. Level get incremented. -Now even while previous (merged) parts still exists in filesystem for a while (as inactive) clickhouse is smart enough to understand +Now even while previous (merged) parts still exists in filesystem for a while (as inactive) ClickHouse is smart enough to understand that new part 'covers' same range of blocks as 3 parts of the prev 'generation' There might be a fifth section in the part name, data version. diff --git a/content/en/engines/mergetree-table-engine-family/pick-keys.md b/content/en/engines/mergetree-table-engine-family/pick-keys.md index 3a80c8b9e6..14dc412632 100644 --- a/content/en/engines/mergetree-table-engine-family/pick-keys.md +++ b/content/en/engines/mergetree-table-engine-family/pick-keys.md @@ -1,26 +1,29 @@ --- -title: "How to pick an ORDER BY / PRIMARY KEY / PARTITION BY for the MergeTree-family table" -linkTitle: "Proper ordering and partitioning the MergeTree tables" +title: "How to pick an ORDER BY / PRIMARY KEY / PARTITION BY for the MergeTree family table" +linkTitle: "Properly ordering and partitioning MergeTree tables" +keywords: + - order by clickhouse + - clickhouse partition by weight: 100 description: >- - How to pick an ORDER BY / PRIMARY KEY / PARTITION BY for the MergeTree table. + Optimizing ClickHouse® MergeTree tables --- -## How to pick an ORDER BY / PRIMARY KEY - -Good `order by` usually have 3 to 5 columns, from lowest cardinal on the left (and the most important for filtering) to highest cardinal (and less important for filtering). +Good `order by` usually has 3 to 5 columns, from lowest cardinal on the left (and the most important for filtering) to highest cardinal (and less important for filtering). -Practical approach to create an good ORDER BY for a table: +Practical approach to create a good ORDER BY for a table: 1. Pick the columns you use in filtering always -2. The most important for filtering and the lowest cardinal should be the left-most. Typically it's something like `tenant_id` -3. Next column is more cardinal, less important. It can be rounded time sometimes, or `site_id`, or `source_id`, or `group_id` or something similar. -4. repeat p.3 once again (or few times) -5. if you added already all columns important for filtering and you still not addressing a single row with you pk - you can add more columns which can help to put similar records close to each other (to improve the compression) -6. if you have something like hierarchy / tree-like relations between the columns - put there the records from 'root' to 'leaves' for example (continent, country, cityname). This way clickhouse can do lookup by country / city even if continent is not specified (it will just 'check all continents') +2. The most important for filtering and the lowest cardinal should be the left-most. Typically, it's something like `tenant_id` +3. Next column is more cardinal, less important. It can be a rounded time sometimes, or `site_id`, or `source_id`, or `group_id` or something similar. +4. Repeat step 3 once again (or a few times) +5. If you already added all columns important for filtering and you're still not addressing a single row with your pk - you can add more columns which can help to put similar records close to each other (to improve the compression) +6. If you have something like hierarchy / tree-like relations between the columns - put there the records from 'root' to 'leaves' for example (continent, country, cityname). This way ClickHouse® can do a lookup by country/city even if the continent is not specified (it will just 'check all continents') special variants of MergeTree may require special ORDER BY to make the record unique etc. +7. For [timeseries](https://altinity.com/blog/2019-5-23-handling-variable-time-series-efficiently-in-clickhouse), it usually makes sense to put the timestamp as the latest column in ORDER BY, which helps with putting the same data nearby for better locality. There are only 2 major patterns for timestamps in ORDER BY: (..., toStartOf(Day|Hour|...)(timestamp), ..., timestamp) and (..., timestamp). The first one is useful when you often query a small part of a table partition. (table partitioned by months, and you read only 1-4 days 90% of the time). +8. There are exceptions to the rule "low cordinality - first" related to compression ratio. For example, data with a lot of repeated attributes in rows (like clickstream), ordering by session_id will benefit compression and reduce disk read, while setting a low cardinality column (like event type) in the first place makes compression and overall query time worse. -Some examples or good order by +Some examples of good `ORDER BY`: ``` ORDER BY (tenantid, site_id, utm_source, clientid, timestamp) ``` @@ -30,13 +33,15 @@ ORDER BY (site_id, toStartOfHour(timestamp), sessionid, timestamp ) PRIMARY KEY (site_id, toStartOfHour(timestamp), sessionid) ``` +(FWIW, the Altinity blog has [a great article on the LowCardinality datatype](https://altinity.com/blog/2019-3-27-low-cardinality).) + ### For Summing / Aggregating All dimensions go to ORDER BY, all metrics - outside of that. -The most important for filtering columns with the lowest cardinality should be the left most. +The most important for filtering columns with the lowest cardinality should be the left-most. -If number of dimensions is high it's typically make sense to use a prefix of ORDER BY as a PRIMARY KEY to avoid polluting sparse index. +If the number of dimensions is high, it typically makes sense to use a prefix of ORDER BY as a PRIMARY KEY to avoid polluting the sparse index. Examples: @@ -58,13 +63,152 @@ ORDER BY (tenantid, site_id, eventid) -- utm_source is mutable, while tenantid, PRIMARY KEY (tenantid, site_id) -- eventid is not used for filtering, needed only for collapsing duplicates ``` +Also read about LIGHT ORDER BY for speeding FINAL queries - https://kb.altinity.com/altinity-kb-queries-and-syntax/altinity-kb-final-clause-speed/#light-order-by + +### ORDER BY example + + +```sql +-- col1: high Cardinality +-- col2: low cardinality + +CREATE TABLE tests.order_test +( + `col1` DateTime, + `col2` UInt8 +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(col1) +ORDER BY (col1, col2) +-- +SELECT count() +┌───count()─┐ +│ 126371225 │ +└───────────┘ +``` + +So let’s put the highest cardinal column to the left and the least to the right in the `ORDER BY` definition. This will impact in queries like: + +```sql +SELECT * FROM order_test +WHERE col1 > toDateTime('2020-10-01') +ORDER BY col1, col2 +FORMAT `Null` +``` + +Here for the filtering it will use the skipping index to select the parts `WHERE col1 > xxx` and the result won't be need to be ordered because the `ORDER BY` in the query aligns with the `ORDER BY` in the table and the data is already ordered in disk. (FWIW, Alexander Zaitsev and Mikhail Filimonov wrote [a great post on skipping indexes and how they work](https://altinity.com/blog/clickhouse-black-magic-skipping-indices) for the Altinity blog.) + +```bash +executeQuery: (from [::ffff:192.168.11.171]:39428, user: admin) SELECT * FROM order_test WHERE col1 > toDateTime('2020-10-01') ORDER BY col1,col2 FORMAT Null; (stage: Complete) +ContextAccess (admin): Access granted: SELECT(col1, col2) ON tests.order_test +ContextAccess (admin): Access granted: SELECT(col1, col2) ON tests.order_test +InterpreterSelectQuery: FetchColumns -> Complete +tests.order_test (SelectExecutor): Key condition: (column 0 in [1601503201, +Inf)) +tests.order_test (SelectExecutor): MinMax index condition: (column 0 in [1601503201, +Inf)) +tests.order_test (SelectExecutor): Running binary search on index range for part 202010_367_545_8 (7612 marks) +tests.order_test (SelectExecutor): Running binary search on index range for part 202010_549_729_12 (37 marks) +tests.order_test (SelectExecutor): Running binary search on index range for part 202011_689_719_2 (1403 marks) +tests.order_test (SelectExecutor): Running binary search on index range for part 202012_550_730_12 (3 marks) +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 37 +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 3 +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 1403 +tests.order_test (SelectExecutor): Found continuous range in 11 steps +tests.order_test (SelectExecutor): Found continuous range in 3 steps +tests.order_test (SelectExecutor): Running binary search on index range for part 202011_728_728_0 (84 marks) +tests.order_test (SelectExecutor): Found continuous range in 21 steps +tests.order_test (SelectExecutor): Running binary search on index range for part 202011_725_725_0 (128 marks) +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 84 +tests.order_test (SelectExecutor): Running binary search on index range for part 202011_722_722_0 (128 marks) +tests.order_test (SelectExecutor): Found continuous range in 13 steps +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 128 +tests.order_test (SelectExecutor): Found continuous range in 14 steps +tests.order_test (SelectExecutor): Running binary search on index range for part 202011_370_686_19 (5993 marks) +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 5993 +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found continuous range in 25 steps +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 128 +tests.order_test (SelectExecutor): Found continuous range in 14 steps +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 7612 +tests.order_test (SelectExecutor): Found continuous range in 25 steps +tests.order_test (SelectExecutor): Selected 8/9 parts by partition key, 8 parts by primary key, 15380/15380 marks by primary key, 15380 marks to read from 8 ranges +Ok. + +0 rows in set. Elapsed: 0.649 sec. Processed 125.97 million rows, 629.86 MB (194.17 million rows/s., 970.84 MB/s.) +``` + +If we change the `ORDER BY` expression in the query, ClickHouse will need to retrieve the rows and reorder them: + +```sql +SELECT * FROM order_test +WHERE col1 > toDateTime('2020-10-01') +ORDER BY col2, col1 +FORMAT `Null` +``` + +As seen In the `MergingSortedTransform` message, the ORDER BY in the table definition is not aligned with the ORDER BY in the query, so ClickHouse has to reorder the resultset. + +```bash +executeQuery: (from [::ffff:192.168.11.171]:39428, user: admin) SELECT * FROM order_test WHERE col1 > toDateTime('2020-10-01') ORDER BY col2,col1 FORMAT Null; (stage: Complete) +ContextAccess (admin): Access granted: SELECT(col1, col2) ON tests.order_test +ContextAccess (admin): Access granted: SELECT(col1, col2) ON tests.order_test +InterpreterSelectQuery: FetchColumns -> Complete +tests.order_test (SelectExecutor): Key condition: (column 0 in [1601503201, +Inf)) +tests.order_test (SelectExecutor): MinMax index condition: (column 0 in [1601503201, +Inf)) +tests.order_test (SelectExecutor): Running binary search on index range for part 202010_367_545_8 (7612 marks) +tests.order_test (SelectExecutor): Running binary search on index range for part 202012_550_730_12 (3 marks) +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Running binary search on index range for part 202011_725_725_0 (128 marks) +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 3 +tests.order_test (SelectExecutor): Running binary search on index range for part 202011_689_719_2 (1403 marks) +tests.order_test (SelectExecutor): Running binary search on index range for part 202010_549_729_12 (37 marks) +tests.order_test (SelectExecutor): Running binary search on index range for part 202011_728_728_0 (84 marks) +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found continuous range in 3 steps +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Running binary search on index range for part 202011_722_722_0 (128 marks) +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 7612 +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 37 +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found continuous range in 11 steps +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 1403 +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 84 +tests.order_test (SelectExecutor): Found continuous range in 25 steps +tests.order_test (SelectExecutor): Running binary search on index range for part 202011_370_686_19 (5993 marks) +tests.order_test (SelectExecutor): Found continuous range in 21 steps +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 128 +tests.order_test (SelectExecutor): Found continuous range in 13 steps +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found continuous range in 14 steps +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 128 +tests.order_test (SelectExecutor): Found (LEFT) boundary mark: 0 +tests.order_test (SelectExecutor): Found continuous range in 14 steps +tests.order_test (SelectExecutor): Found (RIGHT) boundary mark: 5993 +tests.order_test (SelectExecutor): Found continuous range in 25 steps +tests.order_test (SelectExecutor): Selected 8/9 parts by partition key, 8 parts by primary key, 15380/15380 marks by primary key, 15380 marks to read from 8 ranges +tests.order_test (SelectExecutor): MergingSortedTransform: Merge sorted 1947 blocks, 125972070 rows in 1.423973879 sec., 88465155.05499662 rows/sec., 423.78 MiB/sec +Ok. + +0 rows in set. Elapsed: 1.424 sec. Processed 125.97 million rows, 629.86 MB (88.46 million rows/s., 442.28 MB/s.) +``` + ## PARTITION BY +Things to consider: + * Good size for single partition is something like 1-300Gb. * For Summing/Replacing a bit smaller (400Mb-40Gb) * Better to avoid touching more that few dozens of partitions with typical SELECT query. * Single insert should bring data to one or few partitions. -* The number of partitons in table - dozen or hundreds, not thousands. +* The number of partitions in table - dozen or hundreds, not thousands. The size of partitions you can check in system.parts table. @@ -72,7 +216,7 @@ Examples: ``` -- for time-series: -PARTITION BY toYYYY(timestamp) -- long retention, not too much data +PARTITION BY toYear(timestamp) -- long retention, not too much data PARTITION BY toYYYYMM(timestamp) -- PARTITION BY toMonday(timestamp) -- PARTITION BY toDate(timestamp) -- @@ -86,5 +230,11 @@ PARTITION BY intDiv(transaction_id, 1000000) PARTITION BY userid % 16 ``` -For the small tables (smaller than few gigabytes) partitioning is usually not needed at all (just skip `PARTITION BY` expresssion when you create the table). +For the small tables (smaller than few gigabytes) partitioning is usually not needed at all (just skip `PARTITION BY` expression when you create the table). + +## See also +* [How to change ORDER BY](/altinity-kb-schema-design/change-order-by/) +* [ClickHouse Anti-Patterns: Learning from Users\' Mistakes](https://youtu.be/DP7l6Swkskw?t=3777), a short talk by Mikhail Filimonov +* Clickhouse Documentation - https://clickhouse.com/docs/data-modeling/schema-design#choosing-an-ordering-key + diff --git a/content/en/engines/mergetree-table-engine-family/replacingmergetree/_index.md b/content/en/engines/mergetree-table-engine-family/replacingmergetree/_index.md index ebecce7872..699a47e046 100644 --- a/content/en/engines/mergetree-table-engine-family/replacingmergetree/_index.md +++ b/content/en/engines/mergetree-table-engine-family/replacingmergetree/_index.md @@ -3,9 +3,118 @@ title: "ReplacingMergeTree" linkTitle: "ReplacingMergeTree" description: > ReplacingMergeTree +aliases: + /engines/replacingmergetree --- +[ReplacingMergeTree](https://altinity.com/blog/clickhouse-replacingmergetree-explained-the-good-the-bad-and-the-ugly) +is a powerful ClickHouse® MergeTree engine. It is one of the techniques that can be used to guarantee unicity or exactly once delivery in ClickHouse. + +## General Operations + +### Engine Parameters + +``` +Engine = ReplacingMergeTree([version_column],[is_deleted_column]) +ORDER BY +``` + +* **ORDER BY** -- The ORDER BY defines the columns that need to be unique at merge time. Since merge time can not be decided most of the time, the FINAL keyword is required to remove duplicates. +* **version_column** -- An monotonically increasing number, which can be based on a timestamp. Used for make sure sure updates are executed in a right order. +* **is_deleted_column** (23.2+ see https://github.com/ClickHouse/ClickHouse/pull/41005) -- the column used to delete rows. + +### DML operations + +* CREATE -- ```INSERT INTO t values(..)``` +* READ -- ```SELECT FROM t final``` +* UPDATE -- ```INSERT INTO t(..., _version) values (...)```, insert with incremented version +* DELETE -- ```INSERT INTO t(..., _version, is_deleted) values(..., 1)``` + +### FINAL + +ClickHouse does not guarantee that merge will fire and replace rows using ReplacingMergeTree logic. ```FINAL``` keyword should be used in order to apply merge in a query time. It works reasonably fast when PK filter is used, but maybe slow for ```SELECT *``` type of queries: + +See these links for reference: +* [FINAL clause speed](../../../altinity-kb-queries-and-syntax/altinity-kb-final-clause-speed/) +* [Handling Real-Time Updates in ClickHouse](https://altinity.com/blog/2020/4/14/handling-real-time-updates-in-clickhouse) + +Since 23.2, profile level ```final=1``` can force final automatically, see https://github.com/ClickHouse/ClickHouse/pull/40945 + +ClickHouse merge parts only in scope of single partition, so if two rows with the same replacing key would land in different partitions, they would **never** be merged in single row. FINAL keyword works in other way, it merge all rows across all partitions. But that behavior can be changed via`do_not_merge_across_partitions_select_final` setting. + +```sql +CREATE TABLE repl_tbl_part +( + `key` UInt32, + `value` UInt32, + `part_key` UInt32 +) +ENGINE = ReplacingMergeTree +PARTITION BY part_key +ORDER BY key; + +INSERT INTO repl_tbl_part SELECT + 1 AS key, + number AS value, + number % 2 AS part_key +FROM numbers(4) +SETTINGS optimize_on_insert = 0; + +SELECT * FROM repl_tbl_part; + +┌─key─┬─value─┬─part_key─┐ +│ 1 │ 1 │ 1 │ +│ 1 │ 3 │ 1 │ +└─────┴───────┴──────────┘ +┌─key─┬─value─┬─part_key─┐ +│ 1 │ 0 │ 0 │ +│ 1 │ 2 │ 0 │ +└─────┴───────┴──────────┘ + +SELECT * FROM repl_tbl_part FINAL; + +┌─key─┬─value─┬─part_key─┐ +│ 1 │ 3 │ 1 │ +└─────┴───────┴──────────┘ + +SELECT * FROM repl_tbl_part FINAL SETTINGS do_not_merge_across_partitions_select_final=1; + +┌─key─┬─value─┬─part_key─┐ +│ 1 │ 3 │ 1 │ +└─────┴───────┴──────────┘ +┌─key─┬─value─┬─part_key─┐ +│ 1 │ 2 │ 0 │ +└─────┴───────┴──────────┘ + +OPTIMIZE TABLE repl_tbl_part FINAL; + +SELECT * FROM repl_tbl_part; + +┌─key─┬─value─┬─part_key─┐ +│ 1 │ 3 │ 1 │ +└─────┴───────┴──────────┘ +┌─key─┬─value─┬─part_key─┐ +│ 1 │ 2 │ 0 │ +└─────┴───────┴──────────┘ +``` + +### Deleting the data + +* Delete in partition: ```ALTER TABLE t DELETE WHERE ... in PARTITION 'partition'``` -- slow and asynchronous, rebuilds the partition +* Filter is_deleted in queries: ```SELECT ... WHERE is_deleted = 0``` +* Before 23.2, use ROW POLICY to apply a filter automatically: ``` CREATE ROW POLICY delete_masking on t using is_deleted = 0 for ALL;``` +* 23.2+ ```ReplacingMergeTree(version, is_deleted) ORDER BY .. SETTINGS clean_deleted_rows='Always'``` (see https://github.com/ClickHouse/ClickHouse/pull/41005) + +Other options: +* Partition operations: ```ALTER TABLE t DROP PARTITION 'partition'``` -- locks the table, drops full partition only +* Lightweight delete: ```DELETE FROM t WHERE ...``` -- experimental + +## Use cases + ### Last state +Tested on ClickHouse 23.6 version +FINAL is good in all cases + ```sql CREATE TABLE repl_tbl ( @@ -30,7 +139,7 @@ INSERT INTO repl_tbl SELECT number as key, rand() as val_1, randomStringUTF8(10) SELECT count() FROM repl_tbl ┌──count()─┐ -│ 50000000 │ +│ 40000000 │ └──────────┘ ``` @@ -39,19 +148,19 @@ SELECT count() FROM repl_tbl ```sql -- GROUP BY SELECT key, argMax(val_1, ts) as val_1, argMax(val_2, ts) as val_2, argMax(val_3, ts) as val_3, argMax(val_4, ts) as val_4, argMax(val_5, ts) as val_5, max(ts) FROM repl_tbl WHERE key = 10 GROUP BY key; -1 rows in set. Elapsed: 0.017 sec. Processed 40.96 thousand rows, 5.24 MB (2.44 million rows/s., 312.31 MB/s.) +1 row in set. Elapsed: 0.008 sec. -- ORDER BY LIMIT BY SELECT * FROM repl_tbl WHERE key = 10 ORDER BY ts DESC LIMIT 1 BY key ; -1 rows in set. Elapsed: 0.017 sec. Processed 40.96 thousand rows, 5.24 MB (2.39 million rows/s., 305.41 MB/s.) +1 row in set. Elapsed: 0.006 sec. -- Subquery SELECT * FROM repl_tbl WHERE key = 10 AND ts = (SELECT max(ts) FROM repl_tbl WHERE key = 10); -1 rows in set. Elapsed: 0.019 sec. Processed 40.96 thousand rows, 1.18 MB (2.20 million rows/s., 63.47 MB/s.) +1 row in set. Elapsed: 0.009 sec. -- FINAL SELECT * FROM repl_tbl FINAL WHERE key = 10; -1 rows in set. Elapsed: 0.021 sec. Processed 40.96 thousand rows, 5.24 MB (1.93 million rows/s., 247.63 MB/s.) +1 row in set. Elapsed: 0.008 sec. ``` #### Multiple keys @@ -59,31 +168,31 @@ SELECT * FROM repl_tbl FINAL WHERE key = 10; ```sql -- GROUP BY SELECT key, argMax(val_1, ts) as val_1, argMax(val_2, ts) as val_2, argMax(val_3, ts) as val_3, argMax(val_4, ts) as val_4, argMax(val_5, ts) as val_5, max(ts) FROM repl_tbl WHERE key IN (SELECT toUInt32(number) FROM numbers(1000000) WHERE number % 100) GROUP BY key FORMAT Null; -Peak memory usage (for query): 2.31 GiB. -0 rows in set. Elapsed: 3.264 sec. Processed 5.04 million rows, 645.01 MB (1.54 million rows/s., 197.60 MB/s.) +Peak memory usage (for query): 2.19 GiB. +0 rows in set. Elapsed: 1.043 sec. Processed 5.08 million rows, 524.38 MB (4.87 million rows/s., 502.64 MB/s.) --- set optimize_aggregation_in_order=1; -Peak memory usage (for query): 1.11 GiB. -0 rows in set. Elapsed: 1.772 sec. Processed 2.74 million rows, 350.30 MB (1.54 million rows/s., 197.73 MB/s.) +-- SET optimize_aggregation_in_order=1; +Peak memory usage (for query): 349.94 MiB. +0 rows in set. Elapsed: 0.901 sec. Processed 4.94 million rows, 506.55 MB (5.48 million rows/s., 562.17 MB/s.) -- ORDER BY LIMIT BY SELECT * FROM repl_tbl WHERE key IN (SELECT toUInt32(number) FROM numbers(1000000) WHERE number % 100) ORDER BY ts DESC LIMIT 1 BY key FORMAT Null; -Peak memory usage (for query): 1.08 GiB. -0 rows in set. Elapsed: 2.429 sec. Processed 5.04 million rows, 645.01 MB (2.07 million rows/s., 265.58 MB/s.) +Peak memory usage (for query): 1.12 GiB. +0 rows in set. Elapsed: 1.171 sec. Processed 5.08 million rows, 524.38 MB (4.34 million rows/s., 447.95 MB/s.) -- Subquery SELECT * FROM repl_tbl WHERE (key, ts) IN (SELECT key, max(ts) FROM repl_tbl WHERE key IN (SELECT toUInt32(number) FROM numbers(1000000) WHERE number % 100) GROUP BY key) FORMAT Null; -Peak memory usage (for query): 432.57 MiB. -0 rows in set. Elapsed: 0.939 sec. Processed 5.04 million rows, 160.33 MB (5.36 million rows/s., 170.69 MB/s.) +Peak memory usage (for query): 197.30 MiB. +0 rows in set. Elapsed: 0.484 sec. Processed 8.72 million rows, 507.33 MB (18.04 million rows/s., 1.05 GB/s.) --- set optimize_aggregation_in_order=1; -Peak memory usage (for query): 202.88 MiB. -0 rows in set. Elapsed: 0.824 sec. Processed 5.04 million rows, 160.33 MB (6.11 million rows/s., 194.58 MB/s.) +-- SET optimize_aggregation_in_order=1; +Peak memory usage (for query): 171.93 MiB. +0 rows in set. Elapsed: 0.465 sec. Processed 8.59 million rows, 490.55 MB (18.46 million rows/s., 1.05 GB/s.) -- FINAL SELECT * FROM repl_tbl FINAL WHERE key IN (SELECT toUInt32(number) FROM numbers(1000000) WHERE number % 100) FORMAT Null; -Peak memory usage (for query): 198.32 MiB. -0 rows in set. Elapsed: 1.211 sec. Processed 5.04 million rows, 645.01 MB (4.16 million rows/s., 532.57 MB/s.) +Peak memory usage (for query): 537.13 MiB. +0 rows in set. Elapsed: 0.357 sec. Processed 4.39 million rows, 436.28 MB (12.28 million rows/s., 1.22 GB/s.) ``` #### Full table @@ -91,93 +200,30 @@ Peak memory usage (for query): 198.32 MiB. ```sql -- GROUP BY SELECT key, argMax(val_1, ts) as val_1, argMax(val_2, ts) as val_2, argMax(val_3, ts) as val_3, argMax(val_4, ts) as val_4, argMax(val_5, ts) as val_5, max(ts) FROM repl_tbl GROUP BY key FORMAT Null; -Peak memory usage (for query): 15.02 GiB. -0 rows in set. Elapsed: 19.164 sec. Processed 50.00 million rows, 6.40 GB (2.61 million rows/s., 334.02 MB/s.) +Peak memory usage (for query): 16.08 GiB. +0 rows in set. Elapsed: 11.600 sec. Processed 40.00 million rows, 5.12 GB (3.45 million rows/s., 441.49 MB/s.) --- set optimize_aggregation_in_order=1; -Peak memory usage (for query): 4.44 GiB. -0 rows in set. Elapsed: 9.700 sec. Processed 21.03 million rows, 2.69 GB (2.17 million rows/s., 277.50 MB/s.) +-- SET optimize_aggregation_in_order=1; +Peak memory usage (for query): 865.76 MiB. +0 rows in set. Elapsed: 9.677 sec. Processed 39.82 million rows, 5.10 GB (4.12 million rows/s., 526.89 MB/s.) -- ORDER BY LIMIT BY SELECT * FROM repl_tbl ORDER BY ts DESC LIMIT 1 BY key FORMAT Null; -Peak memory usage (for query): 10.46 GiB. -0 rows in set. Elapsed: 21.264 sec. Processed 50.00 million rows, 6.40 GB (2.35 million rows/s., 301.03 MB/s.) +Peak memory usage (for query): 8.39 GiB. +0 rows in set. Elapsed: 14.489 sec. Processed 40.00 million rows, 5.12 GB (2.76 million rows/s., 353.45 MB/s.) -- Subquery SELECT * FROM repl_tbl WHERE (key, ts) IN (SELECT key, max(ts) FROM repl_tbl GROUP BY key) FORMAT Null; -Peak memory usage (for query): 2.52 GiB. -0 rows in set. Elapsed: 6.891 sec. Processed 50.00 million rows, 1.60 GB (7.26 million rows/s., 232.22 MB/s.) +Peak memory usage (for query): 2.40 GiB. +0 rows in set. Elapsed: 5.225 sec. Processed 79.65 million rows, 5.40 GB (15.24 million rows/s., 1.03 GB/s.) --- set optimize_aggregation_in_order=1; -Peak memory usage (for query): 1.05 GiB. -0 rows in set. Elapsed: 4.427 sec. Processed 50.00 million rows, 1.60 GB (11.29 million rows/s., 361.49 MB/s.) +-- SET optimize_aggregation_in_order=1; +Peak memory usage (for query): 924.39 MiB. +0 rows in set. Elapsed: 4.126 sec. Processed 79.67 million rows, 5.40 GB (19.31 million rows/s., 1.31 GB/s.) -- FINAL SELECT * FROM repl_tbl FINAL FORMAT Null; -Peak memory usage (for query): 838.75 MiB. -0 rows in set. Elapsed: 6.681 sec. Processed 50.00 million rows, 6.40 GB (7.48 million rows/s., 958.18 MB/s.) +Peak memory usage (for query): 834.09 MiB. +0 rows in set. Elapsed: 2.314 sec. Processed 38.80 million rows, 4.97 GB (16.77 million rows/s., 2.15 GB/s.) ``` -### FINAL - -Clickhouse merge parts only in scope of single partition, so if two rows with the same replacing key would land in different partitions, they would **never** be merged in single row. FINAL keyword works in other way, it merge all rows across all partitions. But that behavior can be changed via`do_not_merge_across_partitions_select_final` setting. - -https://kb.altinity.com - -[FINAL clause speed](../../../altinity-kb-queries-and-syntax/altinity-kb-final-clause-speed/) - -```sql -CREATE TABLE repl_tbl_part -( - `key` UInt32, - `value` UInt32, - `part_key` UInt32 -) -ENGINE = ReplacingMergeTree -PARTITION BY part_key -ORDER BY key; - -INSERT INTO repl_tbl_part SELECT - 1 AS key, - number AS value, - number % 2 AS part_key -FROM numbers(4) -SETTINGS optimize_on_insert = 0; - -SELECT * FROM repl_tbl_part; - -┌─key─┬─value─┬─part_key─┐ -│ 1 │ 1 │ 1 │ -│ 1 │ 3 │ 1 │ -└─────┴───────┴──────────┘ -┌─key─┬─value─┬─part_key─┐ -│ 1 │ 0 │ 0 │ -│ 1 │ 2 │ 0 │ -└─────┴───────┴──────────┘ - -SELECT * FROM repl_tbl_part FINAL; - -┌─key─┬─value─┬─part_key─┐ -│ 1 │ 3 │ 1 │ -└─────┴───────┴──────────┘ - -SELECT * FROM repl_tbl_part FINAL SETTINGS do_not_merge_across_partitions_select_final=1; - -┌─key─┬─value─┬─part_key─┐ -│ 1 │ 3 │ 1 │ -└─────┴───────┴──────────┘ -┌─key─┬─value─┬─part_key─┐ -│ 1 │ 2 │ 0 │ -└─────┴───────┴──────────┘ - -OPTIMIZE TABLE repl_tbl_part FINAL; - -SELECT * FROM repl_tbl_part; - -┌─key─┬─value─┬─part_key─┐ -│ 1 │ 3 │ 1 │ -└─────┴───────┴──────────┘ -┌─key─┬─value─┬─part_key─┐ -│ 1 │ 2 │ 0 │ -└─────┴───────┴──────────┘ -``` diff --git a/content/en/engines/mergetree-table-engine-family/replacingmergetree/altinity-kb-replacingmergetree-does-not-collapse-duplicates.md b/content/en/engines/mergetree-table-engine-family/replacingmergetree/altinity-kb-replacingmergetree-does-not-collapse-duplicates.md index 6789cd970c..4705ebe5ae 100644 --- a/content/en/engines/mergetree-table-engine-family/replacingmergetree/altinity-kb-replacingmergetree-does-not-collapse-duplicates.md +++ b/content/en/engines/mergetree-table-engine-family/replacingmergetree/altinity-kb-replacingmergetree-does-not-collapse-duplicates.md @@ -4,12 +4,14 @@ linkTitle: "ReplacingMergeTree does not collapse duplicates" description: > ReplacingMergeTree does not collapse duplicates --- -**Hi there, I have a question about replacing merge trees. I have set up a Materialized View with ReplacingMergeTree table, but even if I call optimize on it, the parts don't get merged. I filled that table yesterday, nothing happened since then. What should I do?** +**Hi there, I have a question about replacing merge trees. I have set up a +[Materialized View](https://www.youtube.com/watch?v=THDk625DGsQ) +with ReplacingMergeTree table, but even if I call optimize on it, the parts don't get merged. I filled that table yesterday, nothing happened since then. What should I do?** Merges are eventual and may never happen. It depends on the number of inserts that happened after, the number of parts in the partition, size of parts. If the total size of input parts are greater than the maximum part size then they will never be merged. -[https://clickhouse.tech/docs/en/operations/settings/merge-tree-settings/\#max-bytes-to-merge-at-max-space-in-pool](https://clickhouse.tech/docs/en/operations/settings/merge-tree-settings/#max-bytes-to-merge-at-max-space-in-pool) +[https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#max-bytes-to-merge-at-max-space-in-pool](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#max-bytes-to-merge-at-max-space-in-pool) -[https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/replacingmergetree/](https://clickhouse.tech/docs/en/engines/table-engines/mergetree-family/replacingmergetree/) +[https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replacingmergetree](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replacingmergetree) _ReplacingMergeTree is suitable for clearing out duplicate data in the background in order to save space, but it doesn’t guarantee the absence of duplicates._ diff --git a/content/en/engines/mergetree-table-engine-family/skip-index.md b/content/en/engines/mergetree-table-engine-family/skip-index.md index 2a64112c3b..b0910a03c0 100644 --- a/content/en/engines/mergetree-table-engine-family/skip-index.md +++ b/content/en/engines/mergetree-table-engine-family/skip-index.md @@ -5,7 +5,9 @@ description: > Skip index --- {{% alert title="Warning" color="warning" %}} -When you are creating skip indexes in non-regular (Replicated)MergeTree tables over non ORDER BY columns. ClickHouse applies index condition on the first step of query execution, so it's possible to get outdated rows. +When you are creating +[skip indexes](https://altinity.com/blog/clickhouse-black-magic-skipping-indices) +in non-regular (Replicated)MergeTree tables over non ORDER BY columns. ClickHouse® applies index condition on the first step of query execution, so it's possible to get outdated rows. {{% /alert %}} ```sql diff --git a/content/en/engines/mergetree-table-engine-family/summingmergetree.md b/content/en/engines/mergetree-table-engine-family/summingmergetree.md index dab7cd29d2..7638a44469 100644 --- a/content/en/engines/mergetree-table-engine-family/summingmergetree.md +++ b/content/en/engines/mergetree-table-engine-family/summingmergetree.md @@ -6,7 +6,9 @@ description: > --- ## Nested structures -In certain conditions it could make sense to collapse one of dimensions to set of arrays. It's usually profitable to do if this dimension is not commonly used in queries. It would reduce amount of rows in aggregated table and speed up queries which doesn't care about this dimension in exchange of aggregation performance by collapsed dimension. +In certain conditions it could make sense to collapse one of dimensions to set of arrays. It's usually profitable to do if this dimension is not commonly used in queries. It would reduce amount of rows in aggregated table and +[speed up queries](https://altinity.com/webinarspage/a-day-in-the-life-of-a-clickhouse-query) +which doesn't care about this dimension in exchange of aggregation performance by collapsed dimension. ```sql CREATE TABLE traffic diff --git a/content/en/engines/mergetree-table-engine-family/versioned-collapsing-mergetree.md b/content/en/engines/mergetree-table-engine-family/versioned-collapsing-mergetree.md new file mode 100644 index 0000000000..2e89a3b1b1 --- /dev/null +++ b/content/en/engines/mergetree-table-engine-family/versioned-collapsing-mergetree.md @@ -0,0 +1,668 @@ +--- +title: "UPSERT by VersionedCollapsingMergeTree" +linkTitle: "VersionedCollapsingMT" +description: How to aggregate mutating event stream with duplicates +--- + +### Challenges with mutated data + +When you have an incoming event stream with duplicates, updates, and deletes, building a consistent row state inside the ClickHouse® table is a big challenge. + +The UPDATE/DELETE approach in the OLTP world won’t help with OLAP databases tuned to handle big batches. UPDATE/DELETE operations in ClickHouse are executed as “mutations,” rewriting a lot of data and being relatively slow. You can’t run such operations very often, as for OLTP databases. But the UPSERT operation (insert and replace) runs fast with the ReplacingMergeTree Engine. It’s even set as the default mode for INSERT without any special keyword. We can emulate UPDATE (or even DELETE) with the UPSERT operation. + +There are a lot of [blog posts](https://altinity.com/blog/clickhouse-replacingmergetree-explained-the-good-the-bad-and-the-ugly) on how to use ReplacingMergeTree Engine to handle mutated data streams. A properly designed table schema with ReplacingMergeTree Engine is a good instrument for building the DWH Dimensions table. But when maintaining metrics in Fact tables, there are several problems: + +- it’s not possible to use a valuable ClickHouse feature - online aggregation of incoming data by Materialized Views or Projections on top of the ReplacingMT table, because duplicates and updates will not be deduplicated by the engine during inserts, and calculated aggregates (like sum or count) will be incorrect. For significant amounts of data, it’s become critical because aggregating raw data during report queries will take too much time. +- unfinished support for DELETEs. While in the newest versions of ClickHouse, it’s possible to add the is_deleted to ReplacingMergeTree parameters, the necessity of manually filtering out deleted rows after FINAL processing makes that feature less useful. +- Mutated data should be localized to the same partition. If the “replacing” row is saved to a partition different from the previous one, the report query will be much slower or produce unexpected results. + +```sql +-- multiple partitions problem +CREATE TABLE RMT +( + `key` Int64, + `someCol` String, + `eventTime` DateTime +) +ENGINE = ReplacingMergeTree() +PARTITION BY toYYYYMM(eventTime) +ORDER BY key; + +INSERT INTO RMT Values (1, 'first', '2024-04-25T10:16:21'); +INSERT INTO RMT Values (1, 'second', '2024-05-02T08:36:59'); + +with merged as (select * from RMT FINAL) +select * from merged +where eventTime < '2024-05-01' +``` + +You will get a row with ‘first’, not an empty set, as one might expect with the FINAL processing of a whole table. + +### Collapsing + +ClickHouse has other table engines, such as CollapsingMergeTree and VersionedCollapsingMergeTree, that can be used even better for UPSERT operation. + +Both work by inserting a “rollback row” to compensate for the previous insert. The difference between CollapsingMergeTree and VersionedCollapsingMergeTree is in the algorithm of collapsing. For Cluster configurations, it’s essential to understand which row came first and who should replace whom. That is why using ReplicatedVersionedCollapsingMergeTree is mandatory for Replicated Clusters. + +When dealing with such complicated data streams, it needs to be solved 3 tasks simultaneously: + +- remove duplicates +- process updates and deletes +- calculate correct aggregates + +It’s essential to understand how the collapsing algorithm of VersionedCollapsingMergeTree works. Quote from the [documentation](https://clickhouse.com/docs/en/operations/settings/settings#max-insert-threads) : + +> When ClickHouse merges data parts, it deletes each pair of rows that have the same primary key and version and different Sign. The order of rows does not matter. +> + +The version column should increase over time. You may use a natural timestamp for that. Random-generated IDs are not suitable for the version column. + +### Replace data in another partition + +Let’s first fix the problem with mutated data in a different partition. + +```sql +CREATE TABLE VCMT +( + key Int64, + someCol String, + eventTime DateTime, + sign Int8 +) +ENGINE = VersionedCollapsingMergeTree(sign,eventTime) +PARTITION BY toYYYYMM(eventTime) +ORDER BY key; + +INSERT INTO VCMT Values (1, 'first', '2024-04-25 10:16:21',1); +INSERT INTO VCMT Values (1, 'first', '2024-04-25 10:16:21',-1), (1, 'second', '2024-05-02 08:36:59',1); + +set do_not_merge_across_partitions_select_final=1; -- for fast FINAL + +select 'no rows after:'; +with merged as + (select * from VCMT FINAL) +select * from merged +where eventTime < '2024-05-01'; +``` + +With VersionedCollapsingMergeTree, we can use more partition strategies, even with columns not tied to the row’s primary key. This could facilitate the creation of faster queries, more convenient TTLs (Time-To-Live), and backups. + +### Row deduplication + +There are several ways to remove duplicates from the event stream. The most effective feature is block deduplication, which occurs when ClickHouse drops incoming blocks with the same checksum (or tag). However, this requires building a smart ingestor capable of saving positions in a transactional manner. + +However, another method is possible: verifying whether a particular row already exists in the destination table to avoid redundant insertions. Together with block deduplication, that method also avoids using ReplacingMergeTree and FINAL during query time. + +Ensuring accuracy and consistency in results requires executing this process on a single thread within one cluster node. This method is particularly suitable for less active event streams, such as those with up to 100,000 events per second. To boost performance, incoming streams should be segmented into several partitions (or 'shards') based on the table/event's Primary Key, with each partition processed on a single thread. + +An example of row deduplication: + +```sql +create table Example1 (id Int64, metric UInt64) +engine = MergeTree order by id; + +create table Example1Null engine = Null as Example1; + +create materialized view __Example1 to Example1 as +select * from Example1Null +where id not in ( + select id from Example1 where id in ( + select id from Example1Null + ) +); +``` + +Here is the trick: + +- use Null table and MatView to be able to access both the insert block and the dest table +- check the existence of IDs in the destination table with a fast index scan by a primary key using the IN operator +- filter existing rows from insert block by NOT IN operator + +In most cases, the insert block does not have too many rows (like 1000-100k), so checking the destination table for their existence by scanning the Primary Key (residing in memory) won’t take much time. However, due to the high table index granularity, it can still be noticeable on high load. To enhance performance, consider reducing index granularity to 4096 (from the default 8192) or even fewer values. + +### Getting old row + +To process updates in CollapsingMergeTree, the 'last row state' must be known before inserting the 'compensation row.' Sometimes, this is possible - CDC events coming from MySQL’s binlog or Postgres’s WAL contain not only 'new' data but also 'old' values. If one of the columns includes a sequence-generated version or timestamp of the row’s update time, it can be used as the row’s 'version' for VersionedCollapsingMergeTree. When the incoming event stream lacks old metric values and suitable version information, we can retrieve that data by examining the ClickHouse table using the same method used for row deduplication in the previous example. + +```sql +create table Example2 (id Int64, metric UInt64, sign Int8) +engine = CollapsingMergeTree(sign) order by id; + +create table Example2Null engine = Null as Example2; + +create materialized view __Example2 to Example2 as +with _old as ( + select *, arrayJoin([-1,1]) as _sign + from Example2 where id in (select id from Example2Null) + ) +select id, + if(_old._sign=-1, _old.metric, _new.metric) as metric +from Example2Null as _new +join _old using id; +``` + +I read more data from the Example2 table than from Example1. Instead of simply checking the row existence by the IN operator, a JOIN with existing rows is used to build a “compensate row.” + +For UPSERT, the collapsing algorithm requires inserting two rows. So, I need to create two rows from any row that is found in the local table. It´s an essential part of the suggested approach, which allows me to produce proper rows for inserting with a human-readable code with clear if() statements. That is why I execute arrayJoin while reading old data. + +Don’t try to run the code above. It’s just a short explanation of the idea, lacking many needed elements. + +### UPSERT by Collapsing + +Here is a more realistic [example](https://fiddle.clickhouse.com/babb6069-f629-4f6b-be2c-be51c9f0aa9b) with more checks that can be played with: + +```sql +create table Example3 +( + id Int32, + metric1 UInt32, + metric2 UInt32, + _version UInt64, + sign Int8 default 1 +) engine = VersionedCollapsingMergeTree(sign, _version) +ORDER BY id +; +create table Stage engine=Null as Example3 ; + +create materialized view Example3Transform to Example3 as +with __new as ( SELECT * FROM Stage order by _version desc, sign desc limit 1 by id ), + __old AS ( SELECT *, arrayJoin([-1,1]) AS _sign from + ( select * FROM Example3 final + PREWHERE id IN (SELECT id FROM __new) + where sign = 1 + ) + ) +select id, + if(__old._sign = -1, __old.metric1, __new.metric1) AS metric1, + if(__old._sign = -1, __old.metric2, __new.metric2) AS metric2, + if(__old._sign = -1, __old._version, __new._version) AS _version, + if(__old._sign = -1, -1, 1) AS sign +from __new left join __old +using id +where if(__new.sign=-1, + __old._sign = -1, -- insert only delete row if it's found in old data + __new._version > __old._version -- skip duplicates for updates +); + +-- original +insert into Stage values (1,1,1,1,1), (2,2,2,1,1); +select 'step1',* from Example3 ; + +-- no duplicates (with the same version) inserted +insert into Stage values (1,3,1,1,1),(2,3,2,1,1); +select 'step2',* from Example3 ; + +-- delete a row with id=2. version for delete row does not have any meaning +insert into Stage values (2,2,2,0,-1); +select 'step3',* from Example3 final; + +-- replace a row with id=1. row with sign=-1 not needed, but can be in the insert blocks (will be skipped) +insert into Stage values (1,1,1,0,-1),(1,3,3,2,1); +select 'step4',* from Example3 final; +``` + +Important additions: + +- When multiple events with the same ID and different versions are received in the one insert batch, the most recent event is applied. +- “delete rows” with sign=-1 and the wrong version are not used for processing. For the Collapsing algorithm, the delete row version should match the version from the row stored in the local table, not the same version from the replacing row. That’s why I decided to skip such a “delete row” received from the incoming stream and build it from the table’s data. +- using FINAL and PREWHERE (to speed up FINAL) while reading the destination table. PREWHERE filters are applied before FINAL processing, reducing the number of grouped rows. +- filter to skip out-of-order events by checking the version +- DELETE event processing (inside last WHERE) + +### Speed Test + +```sql +set allow_experimental_analyzer=0; +create table Example3 +( + id Int32, + Department String, + metric1 UInt32, + metric2 Float32, + _version UInt64, + sign Int8 default 1 +) engine = VersionedCollapsingMergeTree(sign, _version) + ORDER BY id + partition by (id % 20) +settings index_granularity=4096 +; + +set do_not_merge_across_partitions_select_final=1; + +-- make 100M table +INSERT INTO Example3 +SELECT + number AS id, + ['HR', 'Finance', 'Engineering', 'Sales', 'Marketing'][rand() % 5 + 1] AS Department, + rand() % 1000 AS metric1, + (rand() % 10000) / 100.0 AS metric2, + 0 AS _version, + 1 AS sign +FROM numbers(1E8); + +create function timeSpent as () -> + date_diff('millisecond',(select ts from t1),now64(3)); + +-- measure plain INSERT time for 1M batch +create temporary table t1 (ts DateTime64(3)) as select now64(3); +INSERT INTO Example3 +SELECT + number AS id, + ['HR', 'Finance', 'Engineering', 'Sales', 'Marketing'][rand() % 5 + 1] AS Department, + rand() % 1000 AS metric1, + (rand() % 10000) / 100.0 AS metric2, + 1 AS _version, + 1 AS sign +FROM numbers(1E6); +select '---',timeSpent(),'INSERT'; + +--create table Stage engine=MergeTree order by id as Example3 ; +create table Stage engine=Null as Example3 ; + +create materialized view Example3Transform to Example3 as +with __new as ( SELECT * FROM Stage order by _version desc,sign desc limit 1 by id ), + __old AS ( SELECT *, arrayJoin([-1,1]) AS _sign from + ( select * FROM Example3 final + PREWHERE id IN (SELECT id FROM __new) + where sign = 1 + ) + ) +select id, + if(__old._sign = -1, __old.Department, __new.Department) AS + Department, + if(__old._sign = -1, __old.metric1, __new.metric1) AS metric1, + if(__old._sign = -1, __old.metric2, __new.metric2) AS metric2, + if(__old._sign = -1, __old._version, __new._version) AS _version, + if(__old._sign = -1, -1, 1) AS sign +from __new left join __old using id +where if(__new.sign=-1, + __old._sign = -1, -- insert only delete row if it's found in old data + __new._version > __old._version -- skip duplicates for updates + ); + +-- calculate UPSERT time for 1M batch +drop table t1; +create temporary table t1 (ts DateTime64(3)) as select now64(3); +INSERT INTO Stage +SELECT + (rand() % 1E6)*100 AS id, + --number AS id, + ['HR', 'Finance', 'Engineering', 'Sales', 'Marketing'][rand() % 5 + 1] AS Department, + rand() % 1000 AS metric1, + (rand() % 10000) / 100.0 AS metric2, + 2 AS _version, + 1 AS sign +FROM numbers(1E6); + +select '---',timeSpent(),'UPSERT'; + +-- FINAL query +drop table t1; +create temporary table t1 (ts DateTime64(3)) as select now64(3); +select Department, count(), sum(metric1) from Example3 FINAL +group by Department order by Department +format Null +; +select '---',timeSpent(),'FINAL'; + +-- GROUP BY query +drop table t1; +create temporary table t1 (ts DateTime64(3)) as select now64(3); +select Department, sum(sign), sum(sign*metric1) from Example3 +group by Department order by Department +format Null +; +select '---',timeSpent(),'GROUP BY'; + +optimize table Example3 final; +-- FINAL query +drop table t1; +create temporary table t1 (ts DateTime64(3)) as select now64(3); +select Department, count(), sum(metric1) from Example3 FINAL +group by Department order by Department +format Null +; +select '---',timeSpent(),'FINAL OPTIMIZED'; + +-- GROUP BY query +drop table t1; +create temporary table t1 (ts DateTime64(3)) as select now64(3); +select Department, sum(sign), sum(sign*metric1) from Example3 +group by Department order by Department +format Null +; +select '---',timeSpent(),'GROUP BY OPTIMIZED'; +``` + +You can use fiddle or `clickhouse-local` to run such a test: + +```bash +cat test.sql | clickhouse-local -nm +``` + +Results (Mac A2 Pro), milliseconds: + +```sql +--- 252 INSERT +--- 1710 UPSERT +--- 763 FINAL +--- 311 GROUP BY +--- 314 FINAL OPTIMIZED +--- 295 GROUP BY OPTIMIZED +``` + +UPSERT is six times slower than direct INSERT because it requires looking up the destination table. That is the price. It is better to use idempotent inserts with an exactly-once delivery guarantee. However, it’s not always possible. + +The FINAL speed is quite good, especially if we split the table by 20 partitions, use `do_not_merge_across_partitions_select_final` setting, and keep most of the table’s partitions optimized (1 part per partition). But we can do it better. + +### Adding projections + +Let's add an aggregating projection, and also add a more useful `updated_at` timestamp instead of an abstract `_version` and replace `String` for Department dimension by LowCardinality(String). Let’s look at the difference in time execution. + +https://fiddle.clickhouse.com/3140d341-ccc5-4f57-8fbf-55dbf4883a21 + +```sql +set allow_experimental_analyzer=0; +create table Example4 +( + id Int32, + Department LowCardinality(String), + metric1 Int32, + metric2 Float32, + _version DateTime64(3) default now64(3), + sign Int8 default 1 +) engine = VersionedCollapsingMergeTree(sign, _version) + ORDER BY id + partition by (id % 20) + settings index_granularity=4096 +; + +set do_not_merge_across_partitions_select_final=1; + +-- make 100M table +INSERT INTO Example4 +SELECT + number AS id, + ['HR', 'Finance', 'Engineering', 'Sales', 'Marketing'][rand() % 5 + 1] AS Department, + rand() % 1000 AS metric1, + (rand() % 10000) / 100.0 AS metric2, + 0 AS _version, + 1 AS sign +FROM numbers(1E8); + +create temporary table timeMark (ts DateTime64(3)); +create function timeSpent as () -> + date_diff('millisecond',(select max(ts) from timeMark),now64(3)); + +-- measure plain INSERT time for 1M batch +insert into timeMark select now64(3); +INSERT INTO Example4(id,Department,metric1,metric2) +SELECT + number AS id, + ['HR', 'Finance', 'Engineering', 'Sales', 'Marketing'][rand() % 5 + 1] AS Department, + rand() % 1000 AS metric1, + (rand() % 10000) / 100.0 AS metric2 +FROM numbers(1E6); +select '---',timeSpent(),'INSERT'; + +--create table Stage engine=MergeTree order by id as Example4 ; +create table Stage engine=Null as Example4 ; + +create materialized view Example4Transform to Example4 as +with __new as ( SELECT * FROM Stage order by _version desc,sign desc limit 1 by id ), + __old AS ( SELECT *, arrayJoin([-1,1]) AS _sign from + ( select * FROM Example4 final + PREWHERE id IN (SELECT id FROM __new) + where sign = 1 + ) + ) +select id, + if(__old._sign = -1, __old.Department, __new.Department) AS + Department, + if(__old._sign = -1, __old.metric1, __new.metric1) AS metric1, + if(__old._sign = -1, __old.metric2, __new.metric2) AS metric2, + if(__old._sign = -1, __old._version, __new._version) AS _version, + if(__old._sign = -1, -1, 1) AS sign +from __new left join __old using id +where if(__new.sign=-1, + __old._sign = -1, -- insert only delete row if it's found in old data + __new._version > __old._version -- skip duplicates for updates + ); + +-- calculate UPSERT time for 1M batch +insert into timeMark select now64(3); +INSERT INTO Stage(id,Department,metric1,metric2) +SELECT + (rand() % 1E6)*100 AS id, + --number AS id, + ['HR', 'Finance', 'Engineering', 'Sales', 'Marketing'][rand() % 5 + 1] AS Department, + rand() % 1000 AS metric1, + (rand() % 10000) / 100.0 AS metric2 +FROM numbers(1E6); + +select '---',timeSpent(),'UPSERT'; + +-- FINAL query +insert into timeMark select now64(3); +select Department, count(), sum(metric1) from Example4 FINAL +group by Department order by Department + format Null +; +select '---',timeSpent(),'FINAL'; + +-- GROUP BY query +insert into timeMark select now64(3); +select Department, sum(sign), sum(sign*metric1) from Example4 +group by Department order by Department + format Null +; +select '---',timeSpent(),'GROUP BY'; + +--select '--parts1',partition, count() from system.parts where active and table='Example4' group by partition; + +insert into timeMark select now64(3); +optimize table Example4 final; +select '---',timeSpent(),'OPTIMIZE'; + +-- FINAL OPTIMIZED +insert into timeMark select now64(3); +select Department, count(), sum(metric1) from Example4 FINAL +group by Department order by Department + format Null +; +select '---',timeSpent(),'FINAL OPTIMIZED'; + +-- GROUP BY OPTIMIZED +insert into timeMark select now64(3); +select Department, sum(sign), sum(sign*metric1) from Example4 +group by Department order by Department + format Null +; +select '---',timeSpent(),'GROUP BY OPTIMIZED'; + +-- UPSERT a little data to create more parts +INSERT INTO Stage(id,Department,metric1,metric2) +SELECT + number AS id, + ['HR', 'Finance', 'Engineering', 'Sales', 'Marketing'][rand() % 5 + 1] AS Department, + rand() % 1000 AS metric1, + (rand() % 10000) / 100.0 AS metric2 +FROM numbers(1000); + +--select '--parts2',partition, count() from system.parts where active and table='Example4' group by partition; + +-- GROUP BY SEMI-OPTIMIZED +insert into timeMark select now64(3); +select Department, sum(sign), sum(sign*metric1) from Example4 +group by Department order by Department + format Null +; +select '---',timeSpent(),'GROUP BY SEMI-OPTIMIZED'; + +--alter table Example4 add column Smetric1 Int32 alias metric1*sign; +alter table Example4 add projection byDep (select Department, sum(sign), sum(sign*metric1) group by Department); + +-- Materialize Projection +insert into timeMark select now64(3); +alter table Example4 materialize projection byDep settings mutations_sync=1; +select '---',timeSpent(),'Materialize Projection'; + +-- GROUP BY query Projected +insert into timeMark select now64(3); +select Department, sum(sign), sum(sign*metric1) from Example4 +group by Department order by Department + settings force_optimize_projection=1 + format Null +; +select '---',timeSpent(),'GROUP BY Projected'; + +``` + +Results (Mac A2 Pro), milliseconds: + +```sql +--- 175 INSERT +--- 1613 UPSERT +--- 329 FINAL +--- 102 GROUP BY +--- 10498 OPTIMIZE +--- 103 FINAL OPTIMIZED +--- 90 GROUP BY OPTIMIZED +--- 94 GROUP BY SEMI-OPTIMIZED +--- 919 Materialize Projection +--- 5 GROUP BY Projected +``` + +Some thoughts: + +- INSERT, UPSERT, and SELECT benefit from switching the Department column to LowCardinality. Fewer reads - faster queries. +- OPTIMIZE is VERY expensive +- FINAL is quite fast (especially for the OPTIMIZED table). You don’t need to OPTIMIZE the table till the 1 part for partition to remove FINAL from the query. Not having too many parts already gives you a performance boost. +- GROUP BY for that task is still faster +- projections building requires resources. Inserts to the table with Projections will be longer. Tune the insert timeouts. +- Query over projection is very fast (as it should be). However, it’s not always possible to aggregate data in such a simple way. + +### DELETEs inaccuracy + +The typical CDC event for DWH systems besides INSERT is UPSERT—a new row replaces the old one (with suitable aggregate corrections). But DELETE events are also supported (ones with column sign=-1). The Materialized View described above will correctly process the DELETE event by inserting only 1 row with sign=-1 if a row with a particular ID already exists in the table. In such cases, VersionedCollapsingMergeTree will wipe both rows (with sign=1 & -1) during merge or final operations. + +However, it can lead to incorrect duplicate processing in some rare situations. Here is the scenario: + +- two events happen in the source database (insert and delete) for the very same ID +- only insert event create a duplicate (delete event does not duplicate) +- all 3 events (delete and two inserts) were processed in separate batches +- ClickHouse executes the merge operation very quickly after the first INSERT and DELETE events are received, effectively removing the row with that ID from the table +- the second (duplicated) insert is saved to the table because we lost the information about the first insertion + +The probability of such a sequence is relatively low, especially in normal operations when the amount of DELETEs is not too significant. Processing events in big batches will reduce the probability even more. + +### Combine old and new + +The presented technique can be used to reimplement the AggregatingMergeTree algorithm to combine old and new row data using VersionedCollapsingMergeTree. + +https://fiddle.clickhouse.com/e1d7e04c-f1d6-4a25-9aac-1fe2b543c693 + +```sql +create table Example5 +( + id Int32, + metric1 UInt32, + metric2 Nullable(UInt32), + updated_at DateTime64(3) default now64(3), + sign Int8 default 1 +) engine = VersionedCollapsingMergeTree(sign, updated_at) +ORDER BY id +; +create table Stage engine=Null as Example5 ; + +create materialized view Example5Transform to Example5 as +with __new as ( SELECT * FROM Stage order by sign desc, updated_at desc limit 1 by id ), + __old AS ( SELECT *, arrayJoin([-1,1]) AS _sign from + ( select * FROM Example5 final + PREWHERE id IN (SELECT id FROM __new) + where sign = 1 + ) + ) +select id, + if(__old._sign = -1, __old.metric1, greatest(__new.metric1, __old.metric1)) AS metric1, + if(__old._sign = -1, __old.metric2, ifNull(__new.metric2, __old.metric2)) AS metric2, + if(__old._sign = -1, __old.updated_at, __new.updated_at) AS updated_at, + if(__old._sign = -1, -1, 1) AS sign +from __new left join __old using id +where if(__new.sign=-1, + __old._sign = -1, -- insert only delete row if it's found in old data + __new.updated_at > __old.updated_at -- skip duplicates for updates +); + +-- original +insert into Stage(id) values (1), (2); +select 'step0',* from Example5 ; + +insert into Stage(id,metric1) values (1,1), (2,2); +select 'step1',* from Example5 final; + +insert into Stage(id,metric2) values (1,11), (2,12); +select 'step2',* from Example5 final ; +``` + +### Complex Primary Key + +I used a simple, compact column with Int64 type for the primary key in previous examples. It's better to go this route with monotonically growing IDs like autoincrement ID or SnowFlakeId (based on timestamp). However, in some cases, a more complex primary key is needed. For instance, when storing data for multiple tenants (Customers, partners, etc.) in the same table. This is not a problem for the suggested technique - use all the necessary columns in all filters and JOIN operations as Tuple. + +```sql +create table Example6 +( + id Int64, + tenant_id Int32, + metric1 UInt32, + _version UInt64, + sign Int8 default 1 +) engine = VersionedCollapsingMergeTree(sign, _version) +ORDER BY (tenant_id,id) +; +create table Stage engine=Null as Example6 ; + +create materialized view Example6Transform to Example6 as +with __new as ( SELECT * FROM Stage order by sign desc, _version desc limit 1 by tenant_id,id ), + __old AS ( SELECT *, arrayJoin([-1,1]) AS _sign from + ( select * FROM Example6 final + PREWHERE (tenant_id,id) IN (SELECT tenant_id,id FROM __new) + where sign = 1 + ) + ) +select id,tenant_id, + if(__old._sign = -1, __old.metric1, __new.metric1) AS metric1, + if(__old._sign = -1, __old._version, __new._version) AS _version, + if(__old._sign = -1, -1, 1) AS sign +from __new left join __old +using (tenant_id,id) +where if(__new.sign=-1, + __old._sign = -1, -- insert only delete row if it's found in old data + __new._version > __old._version -- skip duplicates for updates +); +``` + +### Sharding + +The suggested approach works well when inserting data in a single thread on a single replica. This is suitable for up to 1M events per second. However, for higher traffic, it's necessary to use multiple ingesting threads across several replicas. In such cases, collisions caused by parts manipulation and replication delay can disrupt the entire Collapsing algorithm. + +But inserting different shards with a sharding key derived from ID works fine. Every shard will operate with its own non-intersecting set of IDs, and don’t interfere with each other. + +The same approach can be implemented when inserting several threads into the same replica node. For big installations with high traffic and many shards and replicas, the ingesting app can split the data stream into a considerably large number of “virtual shards” (or partitions in Kafka terminology) and then map the “virtual shards” to the threads doing inserts to “physical shards.” + +The incoming stream could be split into several ones by using an expression like `cityHash64(id) % 50 = 0` as a sharding key. The ingesting app should calculate the shard number before sending data to internal buffers that will be flushed to INSERTs. + +```sql +-- emulate insert into distributed table +INSERT INTO function remote('localhos{t,t,t}',default,Stage,id) +SELECT + (rand() % 1E6)*100 AS id, + --number AS id, + ['HR', 'Finance', 'Engineering', 'Sales', 'Marketing'][rand() % 5 + 1] AS Department, + rand() % 1000 AS metric1, + (rand() % 10000) / 100.0 AS metric2, + 2 AS _version, + 1 AS sign +FROM numbers(1000) +settings prefer_localhost_replica=0; +``` diff --git a/content/en/join_slack/_index.md b/content/en/join_slack/_index.md new file mode 100644 index 0000000000..6bb48ecdd9 --- /dev/null +++ b/content/en/join_slack/_index.md @@ -0,0 +1,8 @@ +--- +title: "Join Slack" +linkTitle: "Join the Community" +manualLink: https://altinity.com/slack +weight: 990 +className: test +draft: true +--- diff --git a/content/en/maintenance_ebook/_index.md b/content/en/maintenance_ebook/_index.md new file mode 100644 index 0000000000..b140564933 --- /dev/null +++ b/content/en/maintenance_ebook/_index.md @@ -0,0 +1,6 @@ +--- +title: "ClickHouse Cluster Maintenance eBook" +linkTitle: "ClickHouse Cluster Maintenance eBook" +manualLink: https://hubs.la/Q03ccp610 +weight: 1200 +--- diff --git a/content/en/upgrade/_index.md b/content/en/upgrade/_index.md index 8b1ba1130d..09c8f43689 100644 --- a/content/en/upgrade/_index.md +++ b/content/en/upgrade/_index.md @@ -1,4 +1,6 @@ --- +aliases: +- /https://kb.altinity.com/altinity-kb-setup-and-maintenance/cluster-production-configuration-guide/version-upgrades/ title: "Upgrade" linkTitle: "Upgrade" keywords: @@ -8,16 +10,48 @@ description: > weight: 10 --- +# ClickHouse® Version Upgrade Procedure + +## Step-by-Step Guide: + Normally the upgrade procedure looks like that: -1) pick the release to upgrade -2) check the release notes/changelog between the release you use currently and the target release -3) sometimes you may need to change some configuration settings to change the defaults (for better compatibility, etc) -4) upgrade itself is simple: - * upgrade package (it doesn't trigger the restart of clickhouse-server automatically) - * restart clickhouse-server - * check healthchecks / logs - * repeat on other nodes -6) Mixing several versions working together in the same cluster may often lead to different degradations. Usually, it's not recommended to have a big delay between upgrading different nodes on the same cluster. Usually, you do upgrade on the odd replicas first, and after they were back online - restart the even replicas. -7) upgrade the dev / staging first -8) ensure your schema/queries work properly on the staging env -9) do the production upgrade. + +1) **Pick the release to upgrade** + - If you upgrade the existing installation with a lot of legacy queries, please pick mature versions with extended lifetime for upgrade (use [Altinity Stable Builds](https://docs.altinity.com/altinitystablebuilds/) or LTS releases from the upstream). +2) **Review Release Notes/Changelog** + - Compare the release notes/changelog between your current release and the target release. + - For Altinity Stable Builds: check the release notes of the release you do upgrade to (if you going from some older release - you may need to read several of them for every release in between (for example to upgrade from 22.3 to 23.8 you will need to check [22.8](https://docs.altinity.com/releasenotes/altinity-stable-release-notes/22.8/), + [23.3](https://docs.altinity.com/releasenotes/altinity-stable-release-notes/23.3/), + [23.8](https://docs.altinity.com/releasenotes/altinity-stable-release-notes/23.8/) etc.) + - For upstream releases check the [changelog](https://github.com/ClickHouse/ClickHouse/blob/master/CHANGELOG.md) + - Also ensure that no configuration changes are needed. + - Sometimes, you may need to adjust configuration settings for better compatibility. + - or to opt-out some new features you don’t need (maybe needed to to make the downgrade path possible, or to make it possible for 2 versions to work together) +3) **Prepare Upgrade Checklist** + - Upgrade the package (note that this does not trigger an automatic restart of the clickhouse-server). + - Restart the clickhouse-server service. + - Check health checks and logs. + - Repeat the process on other nodes. +4) **Prepare “Canary” Update Checklist** + - Mixing several versions in the same cluster can lead to different degradations. It is usually not recommended to have a significant delay between upgrading different nodes in the same cluster. + - (If needed / depends on use case) stop ingestion into odd replicas / remove them for load-balancer etc. + - Perform the upgrade on the odd replicas first. Once they are back online, repeat same on the even replicas. + - Test and verify that everything works properly. Check for any errors in the log files. +5) **Upgrade Dev/Staging Environment** + - Follow 3rd and 4th checklist and perform Upgrade the Dev/Staging environment. + - Ensure your schema/queries work properly in the Dev/staging environment. + - Perform testing before plan for production upgrade. + - Also worth to test the downgrade (to have plan B on upgrade failure) +6) **Upgrade Production** + - Once the Dev/Staging environment is verified, proceed with the production upgrade. + +> **Note:** Prepare and test downgrade procedures on staging so the server can be returned to the previous version if necessary. + +In some upgrade scenarios (depending on which version you are upgrading from and to), when different replicas use different ClickHouse versions, you may encounter the following issues: + +1. Replication doesn’t work at all, and delays grow. +2. Errors about 'checksum mismatch' occur, and traffic between replicas increases as they need to resync merge results. +Both problems will be resolved once all replicas are upgraded. + +To know more you can Download our free upgrade guide here : https://altinity.com/clickhouse-upgrade-overview/ + diff --git a/content/en/upgrade/clickhouse-feature-report.md b/content/en/upgrade/clickhouse-feature-report.md new file mode 100644 index 0000000000..ee34fcf435 --- /dev/null +++ b/content/en/upgrade/clickhouse-feature-report.md @@ -0,0 +1,8 @@ +--- +title: "ClickHouse® Function/Engines/Settings Report" +linkTitle: "ClickHouse® Function/Engines/Settings Report" +description: > + Report on ClickHouse® functions, table functions, table engines, system and MergeTree settings, with availability information. +--- + +Follow this link for a complete report on ClickHouse® features with their availability: https://github.com/anselmodadams/ChMisc/blob/main/report/report.md. It is frequently updated (at least once a month). diff --git a/content/en/upgrade/removing-empty-parts.md b/content/en/upgrade/removing-empty-parts.md index 5682474d4c..931704c324 100644 --- a/content/en/upgrade/removing-empty-parts.md +++ b/content/en/upgrade/removing-empty-parts.md @@ -4,7 +4,7 @@ linkTitle: "Removing empty parts" description: > Removing empty parts --- -Removing of empty parts is a new feature introduced in 20.12. +Removing of empty parts is a new feature introduced in ClickHouse® 20.12. Earlier versions leave empty parts (with 0 rows) if TTL removes all rows from a part ([https://github.com/ClickHouse/ClickHouse/issues/5491](https://github.com/ClickHouse/ClickHouse/issues/5491)). If you set up TTL for your data it is likely that there are quite many empty parts in your system. @@ -33,11 +33,11 @@ Removing of empty parts can be disabled by adding `remove_empty_parts=0` to the ```markup $ cat /etc/clickhouse-server/users.d/remove_empty_parts.xml - + 0 - + ``` diff --git a/content/en/upgrade/removing-lost-parts.md b/content/en/upgrade/removing-lost-parts.md index 7cf6dfd4d1..d9a58b2f05 100644 --- a/content/en/upgrade/removing-lost-parts.md +++ b/content/en/upgrade/removing-lost-parts.md @@ -9,7 +9,7 @@ description: > The explanation is here https://github.com/ClickHouse/ClickHouse/pull/26716 -The problem is introduced in 20.1. +The problem is introduced in ClickHouse® 20.1. The problem is fixed in 21.8 and backported to 21.3.16, 21.6.9, 21.7.6. diff --git a/content/en/upgrade/vulnerabilities.md b/content/en/upgrade/vulnerabilities.md index a40a05905e..c95f774966 100644 --- a/content/en/upgrade/vulnerabilities.md +++ b/content/en/upgrade/vulnerabilities.md @@ -6,7 +6,7 @@ description: >- Vulnerabilities --- -## 2022-03-15: 7 vulnerabulities in ClickHouse were published. +## 2022-03-15: 7 vulnerabilities in ClickHouse® were published. See the details https://jfrog.com/blog/7-rce-and-dos-vulnerabilities-found-in-clickhouse-dbms/ diff --git a/content/en/upgrade_ebook/_index.md b/content/en/upgrade_ebook/_index.md new file mode 100644 index 0000000000..b1c2720769 --- /dev/null +++ b/content/en/upgrade_ebook/_index.md @@ -0,0 +1,6 @@ +--- +title: "Upgrade eBook" +linkTitle: "ClickHouse® Upgrade eBook" +manualLink: https://hubs.la/Q03ccpmq0 +weight: 1000 +--- diff --git a/content/en/using-this-knowledgebase/_index.md b/content/en/using-this-knowledgebase/_index.md index be5ed418ed..7e91310489 100644 --- a/content/en/using-this-knowledgebase/_index.md +++ b/content/en/using-this-knowledgebase/_index.md @@ -5,7 +5,7 @@ keywords: - clickhouse updates - clickhouse contributions description: > - Add pages, make updates, and contribute to this ClickHouse knowledge base. + Add pages, make updates, and contribute to this ClickHouse® knowledge base. weight: 12 --- The Altinity Knowledge Base is built on GitHub Pages, using Hugo and Docsy. This guide provides a brief description on how to make updates and add to this knowledge base. diff --git a/go.mod b/go.mod index 1e17a2288a..6ce82537d5 100644 --- a/go.mod +++ b/go.mod @@ -2,4 +2,7 @@ module github.com/Altinity/altinityknowledgebase go 1.18 -require github.com/google/docsy v0.2.0 // indirect +require ( + github.com/google/docsy v0.10.0 // indirect + github.com/google/docsy/dependencies v0.7.2 // indirect +) diff --git a/go.sum b/go.sum index 69482d853c..ab94afa6b8 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,13 @@ github.com/FortAwesome/Font-Awesome v0.0.0-20210804190922-7d3d774145ac/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo= +github.com/FortAwesome/Font-Awesome v0.0.0-20230327165841-0698449d50f2/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo= +github.com/FortAwesome/Font-Awesome v0.0.0-20240402185447-c0f460dca7f7/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo= github.com/google/docsy v0.2.0 h1:DN6wfyyp2rXsjdV1K3wioxOBTRvG6Gg48wLPDso2lc4= github.com/google/docsy v0.2.0/go.mod h1:shlabwAQakGX6qpXU6Iv/b/SilpHRd7d+xqtZQd3v+8= +github.com/google/docsy v0.10.0 h1:6tMDacPwAyRWNCfvsn/9qGOZDQ8b0aRzjRZvnZPY5dg= +github.com/google/docsy v0.10.0/go.mod h1:c0nIAqmRTOuJ01F85U/wJPQtc3Zj9N58Kea9bOT2AJc= github.com/google/docsy/dependencies v0.2.0/go.mod h1:2zZxHF+2qvkyXhLZtsbnqMotxMukJXLaf8fAZER48oo= +github.com/google/docsy/dependencies v0.7.2 h1:+t5ufoADQAj4XneFphz4A+UU0ICAxmNaRHVWtMYXPSI= +github.com/google/docsy/dependencies v0.7.2/go.mod h1:gihhs5gmgeO+wuoay4FwOzob+jYJVyQbNaQOh788lD4= github.com/twbs/bootstrap v4.6.1+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0= +github.com/twbs/bootstrap v5.2.3+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0= +github.com/twbs/bootstrap v5.3.3+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0= \ No newline at end of file diff --git a/layouts/_default/_markup/render-link.html b/layouts/_default/_markup/render-link.html index a6e54ab599..d1a12ac329 100644 --- a/layouts/_default/_markup/render-link.html +++ b/layouts/_default/_markup/render-link.html @@ -1 +1 @@ -
{{ .Text }} \ No newline at end of file +{{ .Text }} diff --git a/layouts/partials/footer.html b/layouts/partials/footer.html index 293e1b80d4..f3e437f0b2 100755 --- a/layouts/partials/footer.html +++ b/layouts/partials/footer.html @@ -1,41 +1,90 @@ {{ $links := .Site.Params.links }}
-
-
- {{ with $links }} - {{ with index . "user"}} - {{ template "footer-links-block" . }} - {{ end }} - {{ end }} + - - - + + + + + + + +
+ {{ define "footer-links-block" }}
    {{ range . }} diff --git a/layouts/partials/hooks/head-end.html b/layouts/partials/hooks/head-end.html index 0e42112f38..1505f1c2fb 100644 --- a/layouts/partials/hooks/head-end.html +++ b/layouts/partials/hooks/head-end.html @@ -1 +1,4 @@ - \ No newline at end of file + + + + \ No newline at end of file diff --git a/layouts/partials/navbar.html b/layouts/partials/navbar.html new file mode 100644 index 0000000000..28137d16e3 --- /dev/null +++ b/layouts/partials/navbar.html @@ -0,0 +1,91 @@ +{{ $cover := and (.HasShortcode "blocks/cover") (not .Site.Params.ui.navbar_translucent_over_cover_disable) }} + diff --git a/layouts/partials/page-meta-links.html b/layouts/partials/page-meta-links.html index f0cb87cd31..3197acf3ff 100644 --- a/layouts/partials/page-meta-links.html +++ b/layouts/partials/page-meta-links.html @@ -1,3 +1,4 @@ +{{ partial "search-input.html" . }} {{ if .File }} {{ $pathFormatted := replace .File.Path "\\" "/" }} {{ $gh_repo := ($.Param "github_repo") }} @@ -15,8 +16,8 @@ {{ $gh_repo_path = printf "%s/%s/content/%s" $gh_branch $gh_subdir $pathFormatted }} {{ end }} - -
    + {{ $editURL := printf "%s/edit/%s" $gh_repo $gh_repo_path }} {{ $createURL := printf "%s/edit/%s" $gh_repo $gh_repo_path }} {{/* $issuesURL := printf "%s/issues/new?title=%s" $gh_repo (htmlEscape $.Title ) */}} diff --git a/layouts/partials/social-links.html b/layouts/partials/social-links.html index b7b291bf01..77a84dc20f 100644 --- a/layouts/partials/social-links.html +++ b/layouts/partials/social-links.html @@ -3,12 +3,12 @@ {{ $linkedinurl := printf "https://www.linkedin.com/shareArticle?mini=true&url=%s&title=%s" (htmlEscape .Permalink) (htmlEscape .Title ) }}
    -

    +
    diff --git a/layouts/partials/toc.html b/layouts/partials/toc.html index c771ea21b3..de5e82d0c8 100644 --- a/layouts/partials/toc.html +++ b/layouts/partials/toc.html @@ -5,4 +5,13 @@ {{ end }} {{ end }} {{ end }} -{{ partial "social-links.html" . }} \ No newline at end of file +{{ partial "social-links.html" . }} + +
    + Altinity®, Altinity.Cloud®, and Altinity Stable® are registered trademarks of Altinity, Inc. ClickHouse® is a registered trademark of ClickHouse, Inc.; Altinity is not affiliated with or associated with ClickHouse, Inc. +
    +
    + Project Antalya
    + Build Real‑Time Data Lakes with ClickHouse® and Apache Iceberg
    + Learn more +
    diff --git a/layouts/redirect/single.html b/layouts/redirect/single.html new file mode 100644 index 0000000000..1537e67292 --- /dev/null +++ b/layouts/redirect/single.html @@ -0,0 +1 @@ +{{- template "_internal/alias.html" (dict "Permalink" .Params.target) -}} diff --git a/resources/_gen/assets/scss/main.scss_3f90599f3717b4a4920df16fdcadce3d.content b/resources/_gen/assets/scss/main.scss_3f90599f3717b4a4920df16fdcadce3d.content new file mode 100644 index 0000000000..5c04df3902 --- /dev/null +++ b/resources/_gen/assets/scss/main.scss_3f90599f3717b4a4920df16fdcadce3d.content @@ -0,0 +1,19896 @@ +/* + +Add styles or override variables from the theme here. + +*/ +@import url("https://fonts.googleapis.com/css?family=Arimo:300,300i,400,400i,700,700i&display=swap"); +:root, +[data-bs-theme="light"] { + --td-pre-bg: var(--bs-tertiary-bg); } + +/*! + * Bootstrap v5.3.3 (https://getbootstrap.com/) + * Copyright 2011-2024 The Bootstrap Authors + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) + */ +:root, +[data-bs-theme="light"] { + --bs-blue: #0d6efd; + --bs-indigo: #6610f2; + --bs-purple: #6f42c1; + --bs-pink: #d63384; + --bs-red: #dc3545; + --bs-orange: #fd7e14; + --bs-yellow: #ffc107; + --bs-green: #198754; + --bs-teal: #20c997; + --bs-cyan: #0dcaf0; + --bs-black: #000; + --bs-white: #fff; + --bs-gray: #6c757d; + --bs-gray-dark: #343a40; + --bs-gray-100: #f8f9fa; + --bs-gray-200: #e9ecef; + --bs-gray-300: #dee2e6; + --bs-gray-400: #ced4da; + --bs-gray-500: #adb5bd; + --bs-gray-600: #6c757d; + --bs-gray-700: #495057; + --bs-gray-800: #343a40; + --bs-gray-900: #212529; + --bs-primary: #189DD0; + --bs-secondary: #ffcc00; + --bs-success: #5ca012; + --bs-info: #667373; + --bs-warning: #ed6a5a; + --bs-danger: #fe4954; + --bs-light: #d3f3ee; + --bs-dark: #403f4c; + --bs-primary-rgb: 24, 157, 208; + --bs-secondary-rgb: 255, 204, 0; + --bs-success-rgb: 92, 160, 18; + --bs-info-rgb: 102, 115, 115; + --bs-warning-rgb: 237, 106, 90; + --bs-danger-rgb: 254, 73, 84; + --bs-light-rgb: 211, 243, 238; + --bs-dark-rgb: 64, 63, 76; + --bs-primary-text-emphasis: #0a3f53; + --bs-secondary-text-emphasis: #665200; + --bs-success-text-emphasis: #254007; + --bs-info-text-emphasis: #292e2e; + --bs-warning-text-emphasis: #5f2a24; + --bs-danger-text-emphasis: #661d22; + --bs-light-text-emphasis: #495057; + --bs-dark-text-emphasis: #495057; + --bs-primary-bg-subtle: #d1ebf6; + --bs-secondary-bg-subtle: #fff5cc; + --bs-success-bg-subtle: #deecd0; + --bs-info-bg-subtle: #e0e3e3; + --bs-warning-bg-subtle: #fbe1de; + --bs-danger-bg-subtle: #ffdbdd; + --bs-light-bg-subtle: #fcfcfd; + --bs-dark-bg-subtle: #ced4da; + --bs-primary-border-subtle: #a3d8ec; + --bs-secondary-border-subtle: #ffeb99; + --bs-success-border-subtle: #bed9a0; + --bs-info-border-subtle: #c2c7c7; + --bs-warning-border-subtle: #f8c3bd; + --bs-danger-border-subtle: #ffb6bb; + --bs-light-border-subtle: #e9ecef; + --bs-dark-border-subtle: #adb5bd; + --bs-white-rgb: 255, 255, 255; + --bs-black-rgb: 0, 0, 0; + --bs-font-sans-serif: "Arimo", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + --bs-font-monospace: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; + --bs-gradient: linear-gradient(180deg, rgba(255, 255, 255, 0.15), rgba(255, 255, 255, 0)); + --bs-body-font-family: "Arimo", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + --bs-body-font-size: 1rem; + --bs-body-font-weight: 400; + --bs-body-line-height: 1.5; + --bs-body-color: #212529; + --bs-body-color-rgb: 33, 37, 41; + --bs-body-bg: #fff; + --bs-body-bg-rgb: 255, 255, 255; + --bs-emphasis-color: #000; + --bs-emphasis-color-rgb: 0, 0, 0; + --bs-secondary-color: rgba(33, 37, 41, 0.75); + --bs-secondary-color-rgb: 33, 37, 41; + --bs-secondary-bg: #e9ecef; + --bs-secondary-bg-rgb: 233, 236, 239; + --bs-tertiary-color: rgba(33, 37, 41, 0.5); + --bs-tertiary-color-rgb: 33, 37, 41; + --bs-tertiary-bg: #f8f9fa; + --bs-tertiary-bg-rgb: 248, 249, 250; + --bs-heading-color: inherit; + --bs-link-color: #0d6efd; + --bs-link-color-rgb: 13, 110, 253; + --bs-link-decoration: underline; + --bs-link-hover-color: #094db1; + --bs-link-hover-color-rgb: 9, 77, 177; + --bs-code-color: #997a00; + --bs-highlight-color: #212529; + --bs-highlight-bg: #fff3cd; + --bs-border-width: 1px; + --bs-border-style: solid; + --bs-border-color: #dee2e6; + --bs-border-color-translucent: rgba(0, 0, 0, 0.175); + --bs-border-radius: 0.375rem; + --bs-border-radius-sm: 0.25rem; + --bs-border-radius-lg: 0.5rem; + --bs-border-radius-xl: 1rem; + --bs-border-radius-xxl: 2rem; + --bs-border-radius-2xl: var(--bs-border-radius-xxl); + --bs-border-radius-pill: 50rem; + --bs-box-shadow: 0 0.5rem 1rem rgba(0, 0, 0, 0.15); + --bs-box-shadow-sm: 0 0.125rem 0.25rem rgba(0, 0, 0, 0.075); + --bs-box-shadow-lg: 0 1rem 3rem rgba(0, 0, 0, 0.175); + --bs-box-shadow-inset: inset 0 1px 2px rgba(0, 0, 0, 0.075); + --bs-focus-ring-width: 0.25rem; + --bs-focus-ring-opacity: 0.25; + --bs-focus-ring-color: rgba(24, 157, 208, 0.25); + --bs-form-valid-color: #5ca012; + --bs-form-valid-border-color: #5ca012; + --bs-form-invalid-color: #fe4954; + --bs-form-invalid-border-color: #fe4954; } + +[data-bs-theme="dark"] { + color-scheme: dark; + --bs-body-color: #dee2e6; + --bs-body-color-rgb: 222, 226, 230; + --bs-body-bg: #212529; + --bs-body-bg-rgb: 33, 37, 41; + --bs-emphasis-color: #fff; + --bs-emphasis-color-rgb: 255, 255, 255; + --bs-secondary-color: rgba(222, 226, 230, 0.75); + --bs-secondary-color-rgb: 222, 226, 230; + --bs-secondary-bg: #343a40; + --bs-secondary-bg-rgb: 52, 58, 64; + --bs-tertiary-color: rgba(222, 226, 230, 0.5); + --bs-tertiary-color-rgb: 222, 226, 230; + --bs-tertiary-bg: #2b3035; + --bs-tertiary-bg-rgb: 43, 48, 53; + --bs-primary-text-emphasis: #74c4e3; + --bs-secondary-text-emphasis: #ffe066; + --bs-success-text-emphasis: #9dc671; + --bs-info-text-emphasis: #a3abab; + --bs-warning-text-emphasis: #f4a69c; + --bs-danger-text-emphasis: #fe9298; + --bs-light-text-emphasis: #f8f9fa; + --bs-dark-text-emphasis: #dee2e6; + --bs-primary-bg-subtle: #051f2a; + --bs-secondary-bg-subtle: #332900; + --bs-success-bg-subtle: #122004; + --bs-info-bg-subtle: #141717; + --bs-warning-bg-subtle: #2f1512; + --bs-danger-bg-subtle: #330f11; + --bs-light-bg-subtle: #343a40; + --bs-dark-bg-subtle: #1a1d20; + --bs-primary-border-subtle: #0e5e7d; + --bs-secondary-border-subtle: #997a00; + --bs-success-border-subtle: #37600b; + --bs-info-border-subtle: #3d4545; + --bs-warning-border-subtle: #8e4036; + --bs-danger-border-subtle: #982c32; + --bs-light-border-subtle: #495057; + --bs-dark-border-subtle: #343a40; + --bs-heading-color: inherit; + --bs-link-color: #74c4e3; + --bs-link-hover-color: #9ed6eb; + --bs-link-color-rgb: 116, 196, 227; + --bs-link-hover-color-rgb: 158, 214, 235; + --bs-code-color: #c2af66; + --bs-highlight-color: #dee2e6; + --bs-highlight-bg: #664d03; + --bs-border-color: #495057; + --bs-border-color-translucent: rgba(255, 255, 255, 0.15); + --bs-form-valid-color: #75b798; + --bs-form-valid-border-color: #75b798; + --bs-form-invalid-color: #ea868f; + --bs-form-invalid-border-color: #ea868f; } + +*, +*::before, +*::after { + box-sizing: border-box; } + +@media (prefers-reduced-motion: no-preference) { + :root { + scroll-behavior: smooth; } } + +body { + margin: 0; + font-family: var(--bs-body-font-family); + font-size: var(--bs-body-font-size); + font-weight: var(--bs-body-font-weight); + line-height: var(--bs-body-line-height); + color: var(--bs-body-color); + text-align: var(--bs-body-text-align); + background-color: var(--bs-body-bg); + -webkit-text-size-adjust: 100%; + -webkit-tap-highlight-color: rgba(0, 0, 0, 0); } + +hr { + margin: 1rem 0; + color: inherit; + border: 0; + border-top: var(--bs-border-width) solid; + opacity: 0.25; } + +h6, .h6, h5, .h5, h4, .h4, h3, .h3, .td-footer__links-item, h2, .h2, h1, .h1 { + margin-top: 0; + margin-bottom: 0.5rem; + font-weight: 500; + line-height: 1.2; + color: var(--bs-heading-color); } + +h1, .h1 { + font-size: calc(1.375rem + 1.5vw); } + @media (min-width: 1200px) { + h1, .h1 { + font-size: 2.5rem; } } +h2, .h2 { + font-size: calc(1.325rem + 0.9vw); } + @media (min-width: 1200px) { + h2, .h2 { + font-size: 2rem; } } +h3, .h3, .td-footer__links-item { + font-size: calc(1.275rem + 0.3vw); } + @media (min-width: 1200px) { + h3, .h3, .td-footer__links-item { + font-size: 1.5rem; } } +h4, .h4 { + font-size: calc(1.26rem + 0.12vw); } + @media (min-width: 1200px) { + h4, .h4 { + font-size: 1.35rem; } } +h5, .h5 { + font-size: 1.15rem; } + +h6, .h6 { + font-size: 1rem; } + +p { + margin-top: 0; + margin-bottom: 1rem; } + +abbr[title] { + text-decoration: underline dotted; + cursor: help; + text-decoration-skip-ink: none; } + +address { + margin-bottom: 1rem; + font-style: normal; + line-height: inherit; } + +ol, +ul { + padding-left: 2rem; } + +ol, +ul, +dl { + margin-top: 0; + margin-bottom: 1rem; } + +ol ol, +ul ul, +ol ul, +ul ol { + margin-bottom: 0; } + +dt { + font-weight: 700; } + +dd { + margin-bottom: .5rem; + margin-left: 0; } + +blockquote { + margin: 0 0 1rem; } + +b, +strong { + font-weight: bolder; } + +small, .small, .td-footer__center, .td-cover-block > .byline { + font-size: 0.875em; } + +mark, .mark { + padding: 0.1875em; + color: var(--bs-highlight-color); + background-color: var(--bs-highlight-bg); } + +sub, +sup { + position: relative; + font-size: 0.75em; + line-height: 0; + vertical-align: baseline; } + +sub { + bottom: -.25em; } + +sup { + top: -.5em; } + +a { + color: rgba(var(--bs-link-color-rgb), var(--bs-link-opacity, 1)); + text-decoration: underline; } + a:hover { + --bs-link-color-rgb: var(--bs-link-hover-color-rgb); } + +a:not([href]):not([class]), a:not([href]):not([class]):hover { + color: inherit; + text-decoration: none; } + +pre, +code, +kbd, +samp { + font-family: var(--bs-font-monospace); + font-size: 1em; } + +pre { + display: block; + margin-top: 0; + margin-bottom: 1rem; + overflow: auto; + font-size: 0.875em; } + pre code { + font-size: inherit; + color: inherit; + word-break: normal; } + +code { + font-size: 0.875em; + color: var(--bs-code-color); + word-wrap: break-word; } + a > code { + color: inherit; } + +kbd { + padding: 0.1875rem 0.375rem; + font-size: 0.875em; + color: var(--bs-body-bg); + background-color: var(--bs-body-color); + border-radius: 0.25rem; } + kbd kbd { + padding: 0; + font-size: 1em; } + +figure { + margin: 0 0 1rem; } + +img, +svg { + vertical-align: middle; } + +table { + caption-side: bottom; + border-collapse: collapse; } + +caption { + padding-top: 0.5rem; + padding-bottom: 0.5rem; + color: var(--bs-secondary-color); + text-align: left; } + +th { + text-align: inherit; + text-align: -webkit-match-parent; } + +thead, +tbody, +tfoot, +tr, +td, +th { + border-color: inherit; + border-style: solid; + border-width: 0; } + +label { + display: inline-block; } + +button { + border-radius: 0; } + +button:focus:not(:focus-visible) { + outline: 0; } + +input, +button, +select, +optgroup, +textarea { + margin: 0; + font-family: inherit; + font-size: inherit; + line-height: inherit; } + +button, +select { + text-transform: none; } + +[role="button"] { + cursor: pointer; } + +select { + word-wrap: normal; } + select:disabled { + opacity: 1; } + +[list]:not([type="date"]):not([type="datetime-local"]):not([type="month"]):not([type="week"]):not([type="time"])::-webkit-calendar-picker-indicator { + display: none !important; } + +button, +[type="button"], +[type="reset"], +[type="submit"] { + -webkit-appearance: button; } + button:not(:disabled), + [type="button"]:not(:disabled), + [type="reset"]:not(:disabled), + [type="submit"]:not(:disabled) { + cursor: pointer; } + +::-moz-focus-inner { + padding: 0; + border-style: none; } + +textarea { + resize: vertical; } + +fieldset { + min-width: 0; + padding: 0; + margin: 0; + border: 0; } + +legend { + float: left; + width: 100%; + padding: 0; + margin-bottom: 0.5rem; + font-size: calc(1.275rem + 0.3vw); + line-height: inherit; } + @media (min-width: 1200px) { + legend { + font-size: 1.5rem; } } + legend + * { + clear: left; } + +::-webkit-datetime-edit-fields-wrapper, +::-webkit-datetime-edit-text, +::-webkit-datetime-edit-minute, +::-webkit-datetime-edit-hour-field, +::-webkit-datetime-edit-day-field, +::-webkit-datetime-edit-month-field, +::-webkit-datetime-edit-year-field { + padding: 0; } + +::-webkit-inner-spin-button { + height: auto; } + +[type="search"] { + -webkit-appearance: textfield; + outline-offset: -2px; } + +/* rtl:raw: +[type="tel"], +[type="url"], +[type="email"], +[type="number"] { + direction: ltr; +} +*/ +::-webkit-search-decoration { + -webkit-appearance: none; } + +::-webkit-color-swatch-wrapper { + padding: 0; } + +::file-selector-button { + font: inherit; + -webkit-appearance: button; } + +output { + display: inline-block; } + +iframe { + border: 0; } + +summary { + display: list-item; + cursor: pointer; } + +progress { + vertical-align: baseline; } + +[hidden] { + display: none !important; } + +.lead { + font-size: 1.25rem; + font-weight: 300; } + +.display-1 { + font-size: calc(1.625rem + 4.5vw); + font-weight: 300; + line-height: 1.2; } + @media (min-width: 1200px) { + .display-1 { + font-size: 5rem; } } +.display-2 { + font-size: calc(1.575rem + 3.9vw); + font-weight: 300; + line-height: 1.2; } + @media (min-width: 1200px) { + .display-2 { + font-size: 4.5rem; } } +.display-3 { + font-size: calc(1.525rem + 3.3vw); + font-weight: 300; + line-height: 1.2; } + @media (min-width: 1200px) { + .display-3 { + font-size: 4rem; } } +.display-4 { + font-size: calc(1.475rem + 2.7vw); + font-weight: 300; + line-height: 1.2; } + @media (min-width: 1200px) { + .display-4 { + font-size: 3.5rem; } } +.display-5 { + font-size: calc(1.425rem + 2.1vw); + font-weight: 300; + line-height: 1.2; } + @media (min-width: 1200px) { + .display-5 { + font-size: 3rem; } } +.display-6 { + font-size: calc(1.375rem + 1.5vw); + font-weight: 300; + line-height: 1.2; } + @media (min-width: 1200px) { + .display-6 { + font-size: 2.5rem; } } +.list-unstyled, .td-blog-posts-list { + padding-left: 0; + list-style: none; } + +.list-inline, .td-footer__links-list { + padding-left: 0; + list-style: none; } + +.list-inline-item, .td-footer__links-item { + display: inline-block; } + .list-inline-item:not(:last-child), .td-footer__links-item:not(:last-child) { + margin-right: 1rem; } + +.initialism { + font-size: 0.875em; + text-transform: uppercase; } + +.blockquote { + margin-bottom: 1rem; + font-size: 1.25rem; } + .blockquote > :last-child { + margin-bottom: 0; } + +.blockquote-footer { + margin-top: -1rem; + margin-bottom: 1rem; + font-size: 0.875em; + color: #6c757d; } + .blockquote-footer::before { + content: "\2014\00A0"; } + +.img-fluid { + max-width: 100%; + height: auto; } + +.img-thumbnail { + padding: 0.25rem; + background-color: var(--bs-body-bg); + border: var(--bs-border-width) solid var(--bs-border-color); + border-radius: var(--bs-border-radius); + box-shadow: var(--bs-box-shadow-sm); + max-width: 100%; + height: auto; } + +.figure { + display: inline-block; } + +.figure-img { + margin-bottom: 0.5rem; + line-height: 1; } + +.figure-caption { + font-size: 0.875em; + color: var(--bs-secondary-color); } + +.container, +.container-fluid, +.container-xxl, +.container-xl, +.container-lg, +.container-md, +.container-sm { + --bs-gutter-x: 1.5rem; + --bs-gutter-y: 0; + width: 100%; + padding-right: calc(var(--bs-gutter-x) * .5); + padding-left: calc(var(--bs-gutter-x) * .5); + margin-right: auto; + margin-left: auto; } + +@media (min-width: 576px) { + .container-sm, .container { + max-width: 540px; } } + +@media (min-width: 768px) { + .container-md, .container-sm, .container { + max-width: 720px; } } + +@media (min-width: 992px) { + .container-lg, .container-md, .container-sm, .container { + max-width: 960px; } } + +@media (min-width: 1200px) { + .container-xl, .container-lg, .container-md, .container-sm, .container { + max-width: 1140px; } } + +@media (min-width: 1400px) { + .container-xxl, .container-xl, .container-lg, .container-md, .container-sm, .container { + max-width: 1320px; } } + +:root { + --bs-breakpoint-xs: 0; + --bs-breakpoint-sm: 576px; + --bs-breakpoint-md: 768px; + --bs-breakpoint-lg: 992px; + --bs-breakpoint-xl: 1200px; + --bs-breakpoint-xxl: 1400px; } + +.row { + --bs-gutter-x: 1.5rem; + --bs-gutter-y: 0; + display: flex; + flex-wrap: wrap; + margin-top: calc(-1 * var(--bs-gutter-y)); + margin-right: calc(-.5 * var(--bs-gutter-x)); + margin-left: calc(-.5 * var(--bs-gutter-x)); } + .row > * { + flex-shrink: 0; + width: 100%; + max-width: 100%; + padding-right: calc(var(--bs-gutter-x) * .5); + padding-left: calc(var(--bs-gutter-x) * .5); + margin-top: var(--bs-gutter-y); } + +.col { + flex: 1 0 0%; } + +.row-cols-auto > * { + flex: 0 0 auto; + width: auto; } + +.row-cols-1 > * { + flex: 0 0 auto; + width: 100%; } + +.row-cols-2 > * { + flex: 0 0 auto; + width: 50%; } + +.row-cols-3 > * { + flex: 0 0 auto; + width: 33.33333333%; } + +.row-cols-4 > * { + flex: 0 0 auto; + width: 25%; } + +.row-cols-5 > * { + flex: 0 0 auto; + width: 20%; } + +.row-cols-6 > * { + flex: 0 0 auto; + width: 16.66666667%; } + +.col-auto { + flex: 0 0 auto; + width: auto; } + +.col-1 { + flex: 0 0 auto; + width: 8.33333333%; } + +.col-2 { + flex: 0 0 auto; + width: 16.66666667%; } + +.col-3 { + flex: 0 0 auto; + width: 25%; } + +.col-4 { + flex: 0 0 auto; + width: 33.33333333%; } + +.col-5 { + flex: 0 0 auto; + width: 41.66666667%; } + +.col-6 { + flex: 0 0 auto; + width: 50%; } + +.col-7 { + flex: 0 0 auto; + width: 58.33333333%; } + +.col-8 { + flex: 0 0 auto; + width: 66.66666667%; } + +.col-9 { + flex: 0 0 auto; + width: 75%; } + +.col-10 { + flex: 0 0 auto; + width: 83.33333333%; } + +.col-11 { + flex: 0 0 auto; + width: 91.66666667%; } + +.col-12 { + flex: 0 0 auto; + width: 100%; } + +.offset-1 { + margin-left: 8.33333333%; } + +.offset-2 { + margin-left: 16.66666667%; } + +.offset-3 { + margin-left: 25%; } + +.offset-4 { + margin-left: 33.33333333%; } + +.offset-5 { + margin-left: 41.66666667%; } + +.offset-6 { + margin-left: 50%; } + +.offset-7 { + margin-left: 58.33333333%; } + +.offset-8 { + margin-left: 66.66666667%; } + +.offset-9 { + margin-left: 75%; } + +.offset-10 { + margin-left: 83.33333333%; } + +.offset-11 { + margin-left: 91.66666667%; } + +.g-0, +.gx-0 { + --bs-gutter-x: 0; } + +.g-0, +.gy-0 { + --bs-gutter-y: 0; } + +.g-1, +.gx-1 { + --bs-gutter-x: 0.25rem; } + +.g-1, +.gy-1 { + --bs-gutter-y: 0.25rem; } + +.g-2, +.gx-2 { + --bs-gutter-x: 0.5rem; } + +.g-2, +.gy-2 { + --bs-gutter-y: 0.5rem; } + +.g-3, +.gx-3 { + --bs-gutter-x: 1rem; } + +.g-3, +.gy-3 { + --bs-gutter-y: 1rem; } + +.g-4, +.gx-4 { + --bs-gutter-x: 1.5rem; } + +.g-4, +.gy-4 { + --bs-gutter-y: 1.5rem; } + +.g-5, +.gx-5 { + --bs-gutter-x: 3rem; } + +.g-5, +.gy-5 { + --bs-gutter-y: 3rem; } + +@media (min-width: 576px) { + .col-sm { + flex: 1 0 0%; } + .row-cols-sm-auto > * { + flex: 0 0 auto; + width: auto; } + .row-cols-sm-1 > * { + flex: 0 0 auto; + width: 100%; } + .row-cols-sm-2 > * { + flex: 0 0 auto; + width: 50%; } + .row-cols-sm-3 > * { + flex: 0 0 auto; + width: 33.33333333%; } + .row-cols-sm-4 > * { + flex: 0 0 auto; + width: 25%; } + .row-cols-sm-5 > * { + flex: 0 0 auto; + width: 20%; } + .row-cols-sm-6 > * { + flex: 0 0 auto; + width: 16.66666667%; } + .col-sm-auto { + flex: 0 0 auto; + width: auto; } + .col-sm-1 { + flex: 0 0 auto; + width: 8.33333333%; } + .col-sm-2 { + flex: 0 0 auto; + width: 16.66666667%; } + .col-sm-3 { + flex: 0 0 auto; + width: 25%; } + .col-sm-4 { + flex: 0 0 auto; + width: 33.33333333%; } + .col-sm-5 { + flex: 0 0 auto; + width: 41.66666667%; } + .col-sm-6 { + flex: 0 0 auto; + width: 50%; } + .col-sm-7 { + flex: 0 0 auto; + width: 58.33333333%; } + .col-sm-8 { + flex: 0 0 auto; + width: 66.66666667%; } + .col-sm-9 { + flex: 0 0 auto; + width: 75%; } + .col-sm-10 { + flex: 0 0 auto; + width: 83.33333333%; } + .col-sm-11 { + flex: 0 0 auto; + width: 91.66666667%; } + .col-sm-12 { + flex: 0 0 auto; + width: 100%; } + .offset-sm-0 { + margin-left: 0; } + .offset-sm-1 { + margin-left: 8.33333333%; } + .offset-sm-2 { + margin-left: 16.66666667%; } + .offset-sm-3 { + margin-left: 25%; } + .offset-sm-4 { + margin-left: 33.33333333%; } + .offset-sm-5 { + margin-left: 41.66666667%; } + .offset-sm-6 { + margin-left: 50%; } + .offset-sm-7 { + margin-left: 58.33333333%; } + .offset-sm-8 { + margin-left: 66.66666667%; } + .offset-sm-9 { + margin-left: 75%; } + .offset-sm-10 { + margin-left: 83.33333333%; } + .offset-sm-11 { + margin-left: 91.66666667%; } + .g-sm-0, + .gx-sm-0 { + --bs-gutter-x: 0; } + .g-sm-0, + .gy-sm-0 { + --bs-gutter-y: 0; } + .g-sm-1, + .gx-sm-1 { + --bs-gutter-x: 0.25rem; } + .g-sm-1, + .gy-sm-1 { + --bs-gutter-y: 0.25rem; } + .g-sm-2, + .gx-sm-2 { + --bs-gutter-x: 0.5rem; } + .g-sm-2, + .gy-sm-2 { + --bs-gutter-y: 0.5rem; } + .g-sm-3, + .gx-sm-3 { + --bs-gutter-x: 1rem; } + .g-sm-3, + .gy-sm-3 { + --bs-gutter-y: 1rem; } + .g-sm-4, + .gx-sm-4 { + --bs-gutter-x: 1.5rem; } + .g-sm-4, + .gy-sm-4 { + --bs-gutter-y: 1.5rem; } + .g-sm-5, + .gx-sm-5 { + --bs-gutter-x: 3rem; } + .g-sm-5, + .gy-sm-5 { + --bs-gutter-y: 3rem; } } + +@media (min-width: 768px) { + .col-md { + flex: 1 0 0%; } + .row-cols-md-auto > * { + flex: 0 0 auto; + width: auto; } + .row-cols-md-1 > * { + flex: 0 0 auto; + width: 100%; } + .row-cols-md-2 > * { + flex: 0 0 auto; + width: 50%; } + .row-cols-md-3 > * { + flex: 0 0 auto; + width: 33.33333333%; } + .row-cols-md-4 > * { + flex: 0 0 auto; + width: 25%; } + .row-cols-md-5 > * { + flex: 0 0 auto; + width: 20%; } + .row-cols-md-6 > * { + flex: 0 0 auto; + width: 16.66666667%; } + .col-md-auto { + flex: 0 0 auto; + width: auto; } + .col-md-1 { + flex: 0 0 auto; + width: 8.33333333%; } + .col-md-2 { + flex: 0 0 auto; + width: 16.66666667%; } + .col-md-3 { + flex: 0 0 auto; + width: 25%; } + .col-md-4 { + flex: 0 0 auto; + width: 33.33333333%; } + .col-md-5 { + flex: 0 0 auto; + width: 41.66666667%; } + .col-md-6 { + flex: 0 0 auto; + width: 50%; } + .col-md-7 { + flex: 0 0 auto; + width: 58.33333333%; } + .col-md-8 { + flex: 0 0 auto; + width: 66.66666667%; } + .col-md-9 { + flex: 0 0 auto; + width: 75%; } + .col-md-10 { + flex: 0 0 auto; + width: 83.33333333%; } + .col-md-11 { + flex: 0 0 auto; + width: 91.66666667%; } + .col-md-12 { + flex: 0 0 auto; + width: 100%; } + .offset-md-0 { + margin-left: 0; } + .offset-md-1 { + margin-left: 8.33333333%; } + .offset-md-2 { + margin-left: 16.66666667%; } + .offset-md-3 { + margin-left: 25%; } + .offset-md-4 { + margin-left: 33.33333333%; } + .offset-md-5 { + margin-left: 41.66666667%; } + .offset-md-6 { + margin-left: 50%; } + .offset-md-7 { + margin-left: 58.33333333%; } + .offset-md-8 { + margin-left: 66.66666667%; } + .offset-md-9 { + margin-left: 75%; } + .offset-md-10 { + margin-left: 83.33333333%; } + .offset-md-11 { + margin-left: 91.66666667%; } + .g-md-0, + .gx-md-0 { + --bs-gutter-x: 0; } + .g-md-0, + .gy-md-0 { + --bs-gutter-y: 0; } + .g-md-1, + .gx-md-1 { + --bs-gutter-x: 0.25rem; } + .g-md-1, + .gy-md-1 { + --bs-gutter-y: 0.25rem; } + .g-md-2, + .gx-md-2 { + --bs-gutter-x: 0.5rem; } + .g-md-2, + .gy-md-2 { + --bs-gutter-y: 0.5rem; } + .g-md-3, + .gx-md-3 { + --bs-gutter-x: 1rem; } + .g-md-3, + .gy-md-3 { + --bs-gutter-y: 1rem; } + .g-md-4, + .gx-md-4 { + --bs-gutter-x: 1.5rem; } + .g-md-4, + .gy-md-4 { + --bs-gutter-y: 1.5rem; } + .g-md-5, + .gx-md-5 { + --bs-gutter-x: 3rem; } + .g-md-5, + .gy-md-5 { + --bs-gutter-y: 3rem; } } + +@media (min-width: 992px) { + .col-lg { + flex: 1 0 0%; } + .row-cols-lg-auto > * { + flex: 0 0 auto; + width: auto; } + .row-cols-lg-1 > * { + flex: 0 0 auto; + width: 100%; } + .row-cols-lg-2 > * { + flex: 0 0 auto; + width: 50%; } + .row-cols-lg-3 > * { + flex: 0 0 auto; + width: 33.33333333%; } + .row-cols-lg-4 > * { + flex: 0 0 auto; + width: 25%; } + .row-cols-lg-5 > * { + flex: 0 0 auto; + width: 20%; } + .row-cols-lg-6 > * { + flex: 0 0 auto; + width: 16.66666667%; } + .col-lg-auto { + flex: 0 0 auto; + width: auto; } + .col-lg-1 { + flex: 0 0 auto; + width: 8.33333333%; } + .col-lg-2 { + flex: 0 0 auto; + width: 16.66666667%; } + .col-lg-3 { + flex: 0 0 auto; + width: 25%; } + .col-lg-4 { + flex: 0 0 auto; + width: 33.33333333%; } + .col-lg-5 { + flex: 0 0 auto; + width: 41.66666667%; } + .col-lg-6 { + flex: 0 0 auto; + width: 50%; } + .col-lg-7 { + flex: 0 0 auto; + width: 58.33333333%; } + .col-lg-8 { + flex: 0 0 auto; + width: 66.66666667%; } + .col-lg-9 { + flex: 0 0 auto; + width: 75%; } + .col-lg-10 { + flex: 0 0 auto; + width: 83.33333333%; } + .col-lg-11 { + flex: 0 0 auto; + width: 91.66666667%; } + .col-lg-12 { + flex: 0 0 auto; + width: 100%; } + .offset-lg-0 { + margin-left: 0; } + .offset-lg-1 { + margin-left: 8.33333333%; } + .offset-lg-2 { + margin-left: 16.66666667%; } + .offset-lg-3 { + margin-left: 25%; } + .offset-lg-4 { + margin-left: 33.33333333%; } + .offset-lg-5 { + margin-left: 41.66666667%; } + .offset-lg-6 { + margin-left: 50%; } + .offset-lg-7 { + margin-left: 58.33333333%; } + .offset-lg-8 { + margin-left: 66.66666667%; } + .offset-lg-9 { + margin-left: 75%; } + .offset-lg-10 { + margin-left: 83.33333333%; } + .offset-lg-11 { + margin-left: 91.66666667%; } + .g-lg-0, + .gx-lg-0 { + --bs-gutter-x: 0; } + .g-lg-0, + .gy-lg-0 { + --bs-gutter-y: 0; } + .g-lg-1, + .gx-lg-1 { + --bs-gutter-x: 0.25rem; } + .g-lg-1, + .gy-lg-1 { + --bs-gutter-y: 0.25rem; } + .g-lg-2, + .gx-lg-2 { + --bs-gutter-x: 0.5rem; } + .g-lg-2, + .gy-lg-2 { + --bs-gutter-y: 0.5rem; } + .g-lg-3, + .gx-lg-3 { + --bs-gutter-x: 1rem; } + .g-lg-3, + .gy-lg-3 { + --bs-gutter-y: 1rem; } + .g-lg-4, + .gx-lg-4 { + --bs-gutter-x: 1.5rem; } + .g-lg-4, + .gy-lg-4 { + --bs-gutter-y: 1.5rem; } + .g-lg-5, + .gx-lg-5 { + --bs-gutter-x: 3rem; } + .g-lg-5, + .gy-lg-5 { + --bs-gutter-y: 3rem; } } + +@media (min-width: 1200px) { + .col-xl { + flex: 1 0 0%; } + .row-cols-xl-auto > * { + flex: 0 0 auto; + width: auto; } + .row-cols-xl-1 > * { + flex: 0 0 auto; + width: 100%; } + .row-cols-xl-2 > * { + flex: 0 0 auto; + width: 50%; } + .row-cols-xl-3 > * { + flex: 0 0 auto; + width: 33.33333333%; } + .row-cols-xl-4 > * { + flex: 0 0 auto; + width: 25%; } + .row-cols-xl-5 > * { + flex: 0 0 auto; + width: 20%; } + .row-cols-xl-6 > * { + flex: 0 0 auto; + width: 16.66666667%; } + .col-xl-auto { + flex: 0 0 auto; + width: auto; } + .col-xl-1 { + flex: 0 0 auto; + width: 8.33333333%; } + .col-xl-2 { + flex: 0 0 auto; + width: 16.66666667%; } + .col-xl-3 { + flex: 0 0 auto; + width: 25%; } + .col-xl-4 { + flex: 0 0 auto; + width: 33.33333333%; } + .col-xl-5 { + flex: 0 0 auto; + width: 41.66666667%; } + .col-xl-6 { + flex: 0 0 auto; + width: 50%; } + .col-xl-7 { + flex: 0 0 auto; + width: 58.33333333%; } + .col-xl-8 { + flex: 0 0 auto; + width: 66.66666667%; } + .col-xl-9 { + flex: 0 0 auto; + width: 75%; } + .col-xl-10 { + flex: 0 0 auto; + width: 83.33333333%; } + .col-xl-11 { + flex: 0 0 auto; + width: 91.66666667%; } + .col-xl-12 { + flex: 0 0 auto; + width: 100%; } + .offset-xl-0 { + margin-left: 0; } + .offset-xl-1 { + margin-left: 8.33333333%; } + .offset-xl-2 { + margin-left: 16.66666667%; } + .offset-xl-3 { + margin-left: 25%; } + .offset-xl-4 { + margin-left: 33.33333333%; } + .offset-xl-5 { + margin-left: 41.66666667%; } + .offset-xl-6 { + margin-left: 50%; } + .offset-xl-7 { + margin-left: 58.33333333%; } + .offset-xl-8 { + margin-left: 66.66666667%; } + .offset-xl-9 { + margin-left: 75%; } + .offset-xl-10 { + margin-left: 83.33333333%; } + .offset-xl-11 { + margin-left: 91.66666667%; } + .g-xl-0, + .gx-xl-0 { + --bs-gutter-x: 0; } + .g-xl-0, + .gy-xl-0 { + --bs-gutter-y: 0; } + .g-xl-1, + .gx-xl-1 { + --bs-gutter-x: 0.25rem; } + .g-xl-1, + .gy-xl-1 { + --bs-gutter-y: 0.25rem; } + .g-xl-2, + .gx-xl-2 { + --bs-gutter-x: 0.5rem; } + .g-xl-2, + .gy-xl-2 { + --bs-gutter-y: 0.5rem; } + .g-xl-3, + .gx-xl-3 { + --bs-gutter-x: 1rem; } + .g-xl-3, + .gy-xl-3 { + --bs-gutter-y: 1rem; } + .g-xl-4, + .gx-xl-4 { + --bs-gutter-x: 1.5rem; } + .g-xl-4, + .gy-xl-4 { + --bs-gutter-y: 1.5rem; } + .g-xl-5, + .gx-xl-5 { + --bs-gutter-x: 3rem; } + .g-xl-5, + .gy-xl-5 { + --bs-gutter-y: 3rem; } } + +@media (min-width: 1400px) { + .col-xxl { + flex: 1 0 0%; } + .row-cols-xxl-auto > * { + flex: 0 0 auto; + width: auto; } + .row-cols-xxl-1 > * { + flex: 0 0 auto; + width: 100%; } + .row-cols-xxl-2 > * { + flex: 0 0 auto; + width: 50%; } + .row-cols-xxl-3 > * { + flex: 0 0 auto; + width: 33.33333333%; } + .row-cols-xxl-4 > * { + flex: 0 0 auto; + width: 25%; } + .row-cols-xxl-5 > * { + flex: 0 0 auto; + width: 20%; } + .row-cols-xxl-6 > * { + flex: 0 0 auto; + width: 16.66666667%; } + .col-xxl-auto { + flex: 0 0 auto; + width: auto; } + .col-xxl-1 { + flex: 0 0 auto; + width: 8.33333333%; } + .col-xxl-2 { + flex: 0 0 auto; + width: 16.66666667%; } + .col-xxl-3 { + flex: 0 0 auto; + width: 25%; } + .col-xxl-4 { + flex: 0 0 auto; + width: 33.33333333%; } + .col-xxl-5 { + flex: 0 0 auto; + width: 41.66666667%; } + .col-xxl-6 { + flex: 0 0 auto; + width: 50%; } + .col-xxl-7 { + flex: 0 0 auto; + width: 58.33333333%; } + .col-xxl-8 { + flex: 0 0 auto; + width: 66.66666667%; } + .col-xxl-9 { + flex: 0 0 auto; + width: 75%; } + .col-xxl-10 { + flex: 0 0 auto; + width: 83.33333333%; } + .col-xxl-11 { + flex: 0 0 auto; + width: 91.66666667%; } + .col-xxl-12 { + flex: 0 0 auto; + width: 100%; } + .offset-xxl-0 { + margin-left: 0; } + .offset-xxl-1 { + margin-left: 8.33333333%; } + .offset-xxl-2 { + margin-left: 16.66666667%; } + .offset-xxl-3 { + margin-left: 25%; } + .offset-xxl-4 { + margin-left: 33.33333333%; } + .offset-xxl-5 { + margin-left: 41.66666667%; } + .offset-xxl-6 { + margin-left: 50%; } + .offset-xxl-7 { + margin-left: 58.33333333%; } + .offset-xxl-8 { + margin-left: 66.66666667%; } + .offset-xxl-9 { + margin-left: 75%; } + .offset-xxl-10 { + margin-left: 83.33333333%; } + .offset-xxl-11 { + margin-left: 91.66666667%; } + .g-xxl-0, + .gx-xxl-0 { + --bs-gutter-x: 0; } + .g-xxl-0, + .gy-xxl-0 { + --bs-gutter-y: 0; } + .g-xxl-1, + .gx-xxl-1 { + --bs-gutter-x: 0.25rem; } + .g-xxl-1, + .gy-xxl-1 { + --bs-gutter-y: 0.25rem; } + .g-xxl-2, + .gx-xxl-2 { + --bs-gutter-x: 0.5rem; } + .g-xxl-2, + .gy-xxl-2 { + --bs-gutter-y: 0.5rem; } + .g-xxl-3, + .gx-xxl-3 { + --bs-gutter-x: 1rem; } + .g-xxl-3, + .gy-xxl-3 { + --bs-gutter-y: 1rem; } + .g-xxl-4, + .gx-xxl-4 { + --bs-gutter-x: 1.5rem; } + .g-xxl-4, + .gy-xxl-4 { + --bs-gutter-y: 1.5rem; } + .g-xxl-5, + .gx-xxl-5 { + --bs-gutter-x: 3rem; } + .g-xxl-5, + .gy-xxl-5 { + --bs-gutter-y: 3rem; } } + +.table, .td-table:not(.td-initial), .td-box table:not(.td-initial) { + --bs-table-color-type: initial; + --bs-table-bg-type: initial; + --bs-table-color-state: initial; + --bs-table-bg-state: initial; + --bs-table-color: var(--bs-emphasis-color); + --bs-table-bg: var(--bs-body-bg); + --bs-table-border-color: var(--bs-border-color); + --bs-table-accent-bg: transparent; + --bs-table-striped-color: var(--bs-emphasis-color); + --bs-table-striped-bg: rgba(var(--bs-emphasis-color-rgb), 0.05); + --bs-table-active-color: var(--bs-emphasis-color); + --bs-table-active-bg: rgba(var(--bs-emphasis-color-rgb), 0.1); + --bs-table-hover-color: var(--bs-emphasis-color); + --bs-table-hover-bg: rgba(var(--bs-emphasis-color-rgb), 0.075); + width: 100%; + margin-bottom: 1rem; + vertical-align: top; + border-color: var(--bs-table-border-color); } + .table > :not(caption) > * > *, .td-table:not(.td-initial) > :not(caption) > * > *, .td-box table:not(.td-initial) > :not(caption) > * > * { + padding: 0.5rem 0.5rem; + color: var(--bs-table-color-state, var(--bs-table-color-type, var(--bs-table-color))); + background-color: var(--bs-table-bg); + border-bottom-width: var(--bs-border-width); + box-shadow: inset 0 0 0 9999px var(--bs-table-bg-state, var(--bs-table-bg-type, var(--bs-table-accent-bg))); } + .table > tbody, .td-table:not(.td-initial) > tbody, .td-box table:not(.td-initial) > tbody { + vertical-align: inherit; } + .table > thead, .td-table:not(.td-initial) > thead, .td-box table:not(.td-initial) > thead { + vertical-align: bottom; } + +.table-group-divider { + border-top: calc(var(--bs-border-width) * 2) solid currentcolor; } + +.caption-top { + caption-side: top; } + +.table-sm > :not(caption) > * > * { + padding: 0.25rem 0.25rem; } + +.table-bordered > :not(caption) > * { + border-width: var(--bs-border-width) 0; } + .table-bordered > :not(caption) > * > * { + border-width: 0 var(--bs-border-width); } + +.table-borderless > :not(caption) > * > * { + border-bottom-width: 0; } + +.table-borderless > :not(:first-child) { + border-top-width: 0; } + +.table-striped > tbody > tr:nth-of-type(odd) > *, .td-table:not(.td-initial) > tbody > tr:nth-of-type(odd) > *, .td-box table:not(.td-initial) > tbody > tr:nth-of-type(odd) > * { + --bs-table-color-type: var(--bs-table-striped-color); + --bs-table-bg-type: var(--bs-table-striped-bg); } + +.table-striped-columns > :not(caption) > tr > :nth-child(even) { + --bs-table-color-type: var(--bs-table-striped-color); + --bs-table-bg-type: var(--bs-table-striped-bg); } + +.table-active { + --bs-table-color-state: var(--bs-table-active-color); + --bs-table-bg-state: var(--bs-table-active-bg); } + +.table-hover > tbody > tr:hover > * { + --bs-table-color-state: var(--bs-table-hover-color); + --bs-table-bg-state: var(--bs-table-hover-bg); } + +.table-primary { + --bs-table-color: #000; + --bs-table-bg: #d1ebf6; + --bs-table-border-color: #a7bcc5; + --bs-table-striped-bg: #c7dfea; + --bs-table-striped-color: #000; + --bs-table-active-bg: #bcd4dd; + --bs-table-active-color: #000; + --bs-table-hover-bg: #c1d9e4; + --bs-table-hover-color: #000; + color: var(--bs-table-color); + border-color: var(--bs-table-border-color); } + +.table-secondary { + --bs-table-color: #000; + --bs-table-bg: #fff5cc; + --bs-table-border-color: #ccc4a3; + --bs-table-striped-bg: #f2e9c2; + --bs-table-striped-color: #000; + --bs-table-active-bg: #e6ddb8; + --bs-table-active-color: #000; + --bs-table-hover-bg: #ece3bd; + --bs-table-hover-color: #000; + color: var(--bs-table-color); + border-color: var(--bs-table-border-color); } + +.table-success { + --bs-table-color: #000; + --bs-table-bg: #deecd0; + --bs-table-border-color: #b2bda6; + --bs-table-striped-bg: #d3e0c6; + --bs-table-striped-color: #000; + --bs-table-active-bg: #c8d4bb; + --bs-table-active-color: #000; + --bs-table-hover-bg: #cddac0; + --bs-table-hover-color: #000; + color: var(--bs-table-color); + border-color: var(--bs-table-border-color); } + +.table-info { + --bs-table-color: #000; + --bs-table-bg: #e0e3e3; + --bs-table-border-color: #b3b6b6; + --bs-table-striped-bg: #d5d8d8; + --bs-table-striped-color: #000; + --bs-table-active-bg: #cacccc; + --bs-table-active-color: #000; + --bs-table-hover-bg: #cfd2d2; + --bs-table-hover-color: #000; + color: var(--bs-table-color); + border-color: var(--bs-table-border-color); } + +.table-warning { + --bs-table-color: #000; + --bs-table-bg: #fbe1de; + --bs-table-border-color: #c9b4b2; + --bs-table-striped-bg: #eed6d3; + --bs-table-striped-color: #000; + --bs-table-active-bg: #e2cbc8; + --bs-table-active-color: #000; + --bs-table-hover-bg: #e8d0cd; + --bs-table-hover-color: #000; + color: var(--bs-table-color); + border-color: var(--bs-table-border-color); } + +.table-danger { + --bs-table-color: #000; + --bs-table-bg: #ffdbdd; + --bs-table-border-color: #ccafb1; + --bs-table-striped-bg: #f2d0d2; + --bs-table-striped-color: #000; + --bs-table-active-bg: #e6c5c7; + --bs-table-active-color: #000; + --bs-table-hover-bg: #eccbcc; + --bs-table-hover-color: #000; + color: var(--bs-table-color); + border-color: var(--bs-table-border-color); } + +.table-light { + --bs-table-color: #000; + --bs-table-bg: #d3f3ee; + --bs-table-border-color: #a9c2be; + --bs-table-striped-bg: #c8e7e2; + --bs-table-striped-color: #000; + --bs-table-active-bg: #bedbd6; + --bs-table-active-color: #000; + --bs-table-hover-bg: #c3e1dc; + --bs-table-hover-color: #000; + color: var(--bs-table-color); + border-color: var(--bs-table-border-color); } + +.table-dark { + --bs-table-color: #fff; + --bs-table-bg: #403f4c; + --bs-table-border-color: #666570; + --bs-table-striped-bg: #4a4955; + --bs-table-striped-color: #fff; + --bs-table-active-bg: #53525e; + --bs-table-active-color: #fff; + --bs-table-hover-bg: #4e4d59; + --bs-table-hover-color: #fff; + color: var(--bs-table-color); + border-color: var(--bs-table-border-color); } + +.table-responsive, .td-table:not(.td-initial), .td-box table:not(.td-initial) { + overflow-x: auto; + -webkit-overflow-scrolling: touch; } + +@media (max-width: 575.98px) { + .table-responsive-sm { + overflow-x: auto; + -webkit-overflow-scrolling: touch; } } + +@media (max-width: 767.98px) { + .table-responsive-md { + overflow-x: auto; + -webkit-overflow-scrolling: touch; } } + +@media (max-width: 991.98px) { + .table-responsive-lg { + overflow-x: auto; + -webkit-overflow-scrolling: touch; } } + +@media (max-width: 1199.98px) { + .table-responsive-xl { + overflow-x: auto; + -webkit-overflow-scrolling: touch; } } + +@media (max-width: 1399.98px) { + .table-responsive-xxl { + overflow-x: auto; + -webkit-overflow-scrolling: touch; } } + +.form-label { + margin-bottom: 0.5rem; } + +.col-form-label { + padding-top: calc(0.375rem + var(--bs-border-width)); + padding-bottom: calc(0.375rem + var(--bs-border-width)); + margin-bottom: 0; + font-size: inherit; + line-height: 1.5; } + +.col-form-label-lg { + padding-top: calc(0.5rem + var(--bs-border-width)); + padding-bottom: calc(0.5rem + var(--bs-border-width)); + font-size: 1.25rem; } + +.col-form-label-sm { + padding-top: calc(0.25rem + var(--bs-border-width)); + padding-bottom: calc(0.25rem + var(--bs-border-width)); + font-size: 0.875rem; } + +.form-text { + margin-top: 0.25rem; + font-size: 0.875em; + color: var(--bs-secondary-color); } + +.form-control { + display: block; + width: 100%; + padding: 0.375rem 0.75rem; + font-size: 1rem; + font-weight: 400; + line-height: 1.5; + color: var(--bs-body-color); + appearance: none; + background-color: var(--bs-body-bg); + background-clip: padding-box; + border: var(--bs-border-width) solid var(--bs-border-color); + border-radius: var(--bs-border-radius); + box-shadow: var(--bs-box-shadow-inset); + transition: border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; } + @media (prefers-reduced-motion: reduce) { + .form-control { + transition: none; } } + .form-control[type="file"] { + overflow: hidden; } + .form-control[type="file"]:not(:disabled):not([readonly]) { + cursor: pointer; } + .form-control:focus { + color: var(--bs-body-color); + background-color: var(--bs-body-bg); + border-color: #8ccee8; + outline: 0; + box-shadow: var(--bs-box-shadow-inset), 0 0 0 0.25rem rgba(24, 157, 208, 0.25); } + .form-control::-webkit-date-and-time-value { + min-width: 85px; + height: 1.5em; + margin: 0; } + .form-control::-webkit-datetime-edit { + display: block; + padding: 0; } + .form-control::placeholder { + color: var(--bs-secondary-color); + opacity: 1; } + .form-control:disabled { + background-color: var(--bs-secondary-bg); + opacity: 1; } + .form-control::file-selector-button { + padding: 0.375rem 0.75rem; + margin: -0.375rem -0.75rem; + margin-inline-end: 0.75rem; + color: var(--bs-body-color); + background-color: var(--bs-tertiary-bg); + background-image: var(--bs-gradient); + pointer-events: none; + border-color: inherit; + border-style: solid; + border-width: 0; + border-inline-end-width: var(--bs-border-width); + border-radius: 0; + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; } + @media (prefers-reduced-motion: reduce) { + .form-control::file-selector-button { + transition: none; } } + .form-control:hover:not(:disabled):not([readonly])::file-selector-button { + background-color: var(--bs-secondary-bg); } + +.form-control-plaintext { + display: block; + width: 100%; + padding: 0.375rem 0; + margin-bottom: 0; + line-height: 1.5; + color: var(--bs-body-color); + background-color: transparent; + border: solid transparent; + border-width: var(--bs-border-width) 0; } + .form-control-plaintext:focus { + outline: 0; } + .form-control-plaintext.form-control-sm, .form-control-plaintext.form-control-lg { + padding-right: 0; + padding-left: 0; } + +.form-control-sm { + min-height: calc(1.5em + 0.5rem + calc(var(--bs-border-width) * 2)); + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + border-radius: var(--bs-border-radius-sm); } + .form-control-sm::file-selector-button { + padding: 0.25rem 0.5rem; + margin: -0.25rem -0.5rem; + margin-inline-end: 0.5rem; } + +.form-control-lg { + min-height: calc(1.5em + 1rem + calc(var(--bs-border-width) * 2)); + padding: 0.5rem 1rem; + font-size: 1.25rem; + border-radius: var(--bs-border-radius-lg); } + .form-control-lg::file-selector-button { + padding: 0.5rem 1rem; + margin: -0.5rem -1rem; + margin-inline-end: 1rem; } + +textarea.form-control { + min-height: calc(1.5em + 0.75rem + calc(var(--bs-border-width) * 2)); } + +textarea.form-control-sm { + min-height: calc(1.5em + 0.5rem + calc(var(--bs-border-width) * 2)); } + +textarea.form-control-lg { + min-height: calc(1.5em + 1rem + calc(var(--bs-border-width) * 2)); } + +.form-control-color { + width: 3rem; + height: calc(1.5em + 0.75rem + calc(var(--bs-border-width) * 2)); + padding: 0.375rem; } + .form-control-color:not(:disabled):not([readonly]) { + cursor: pointer; } + .form-control-color::-moz-color-swatch { + border: 0 !important; + border-radius: var(--bs-border-radius); } + .form-control-color::-webkit-color-swatch { + border: 0 !important; + border-radius: var(--bs-border-radius); } + .form-control-color.form-control-sm { + height: calc(1.5em + 0.5rem + calc(var(--bs-border-width) * 2)); } + .form-control-color.form-control-lg { + height: calc(1.5em + 1rem + calc(var(--bs-border-width) * 2)); } + +.form-select { + --bs-form-select-bg-img: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23343a40' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m2 5 6 6 6-6'/%3e%3c/svg%3e"); + display: block; + width: 100%; + padding: 0.375rem 2.25rem 0.375rem 0.75rem; + font-size: 1rem; + font-weight: 400; + line-height: 1.5; + color: var(--bs-body-color); + appearance: none; + background-color: var(--bs-body-bg); + background-image: var(--bs-form-select-bg-img), var(--bs-form-select-bg-icon, none); + background-repeat: no-repeat; + background-position: right 0.75rem center; + background-size: 16px 12px; + border: var(--bs-border-width) solid var(--bs-border-color); + border-radius: var(--bs-border-radius); + box-shadow: var(--bs-box-shadow-inset); + transition: border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; } + @media (prefers-reduced-motion: reduce) { + .form-select { + transition: none; } } + .form-select:focus { + border-color: #8ccee8; + outline: 0; + box-shadow: var(--bs-box-shadow-inset), 0 0 0 0.25rem rgba(24, 157, 208, 0.25); } + .form-select[multiple], .form-select[size]:not([size="1"]) { + padding-right: 0.75rem; + background-image: none; } + .form-select:disabled { + background-color: var(--bs-secondary-bg); } + .form-select:-moz-focusring { + color: transparent; + text-shadow: 0 0 0 var(--bs-body-color); } + +.form-select-sm { + padding-top: 0.25rem; + padding-bottom: 0.25rem; + padding-left: 0.5rem; + font-size: 0.875rem; + border-radius: var(--bs-border-radius-sm); } + +.form-select-lg { + padding-top: 0.5rem; + padding-bottom: 0.5rem; + padding-left: 1rem; + font-size: 1.25rem; + border-radius: var(--bs-border-radius-lg); } + +[data-bs-theme="dark"] .form-select { + --bs-form-select-bg-img: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16'%3e%3cpath fill='none' stroke='%23dee2e6' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='m2 5 6 6 6-6'/%3e%3c/svg%3e"); } + +.form-check { + display: block; + min-height: 1.5rem; + padding-left: 1.5em; + margin-bottom: 0.125rem; } + .form-check .form-check-input { + float: left; + margin-left: -1.5em; } + +.form-check-reverse { + padding-right: 1.5em; + padding-left: 0; + text-align: right; } + .form-check-reverse .form-check-input { + float: right; + margin-right: -1.5em; + margin-left: 0; } + +.form-check-input { + --bs-form-check-bg: var(--bs-body-bg); + flex-shrink: 0; + width: 1em; + height: 1em; + margin-top: 0.25em; + vertical-align: top; + appearance: none; + background-color: var(--bs-form-check-bg); + background-image: var(--bs-form-check-bg-image); + background-repeat: no-repeat; + background-position: center; + background-size: contain; + border: var(--bs-border-width) solid var(--bs-border-color); + print-color-adjust: exact; } + .form-check-input[type="checkbox"] { + border-radius: 0.25em; } + .form-check-input[type="radio"] { + border-radius: 50%; } + .form-check-input:active { + filter: brightness(90%); } + .form-check-input:focus { + border-color: #8ccee8; + outline: 0; + box-shadow: 0 0 0 0.25rem rgba(24, 157, 208, 0.25); } + .form-check-input:checked { + background-color: #189DD0; + border-color: #189DD0; } + .form-check-input:checked[type="checkbox"] { + --bs-form-check-bg-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='m6 10 3 3 6-6'/%3e%3c/svg%3e"), var(--bs-gradient); } + .form-check-input:checked[type="radio"] { + --bs-form-check-bg-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='2' fill='%23fff'/%3e%3c/svg%3e"), var(--bs-gradient); } + .form-check-input[type="checkbox"]:indeterminate { + background-color: #189DD0; + border-color: #189DD0; + --bs-form-check-bg-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 20 20'%3e%3cpath fill='none' stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='3' d='M6 10h8'/%3e%3c/svg%3e"), var(--bs-gradient); } + .form-check-input:disabled { + pointer-events: none; + filter: none; + opacity: 0.5; } + .form-check-input[disabled] ~ .form-check-label, .form-check-input:disabled ~ .form-check-label { + cursor: default; + opacity: 0.5; } + +.form-switch { + padding-left: 2.5em; } + .form-switch .form-check-input { + --bs-form-switch-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='rgba%280, 0, 0, 0.25%29'/%3e%3c/svg%3e"); + width: 2em; + margin-left: -2.5em; + background-image: var(--bs-form-switch-bg); + background-position: left center; + border-radius: 2em; + transition: background-position 0.15s ease-in-out; } + @media (prefers-reduced-motion: reduce) { + .form-switch .form-check-input { + transition: none; } } + .form-switch .form-check-input:focus { + --bs-form-switch-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%238ccee8'/%3e%3c/svg%3e"); } + .form-switch .form-check-input:checked { + background-position: right center; + --bs-form-switch-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%23fff'/%3e%3c/svg%3e"), var(--bs-gradient); } + .form-switch.form-check-reverse { + padding-right: 2.5em; + padding-left: 0; } + .form-switch.form-check-reverse .form-check-input { + margin-right: -2.5em; + margin-left: 0; } + +.form-check-inline { + display: inline-block; + margin-right: 1rem; } + +.btn-check { + position: absolute; + clip: rect(0, 0, 0, 0); + pointer-events: none; } + .btn-check[disabled] + .btn, div.drawio .btn-check[disabled] + button, .td-blog .btn-check[disabled] + .td-rss-button, .btn-check:disabled + .btn, div.drawio .btn-check:disabled + button, .td-blog .btn-check:disabled + .td-rss-button { + pointer-events: none; + filter: none; + opacity: 0.65; } + +[data-bs-theme="dark"] .form-switch .form-check-input:not(:checked):not(:focus) { + --bs-form-switch-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='rgba%28255, 255, 255, 0.25%29'/%3e%3c/svg%3e"); } + +.form-range { + width: 100%; + height: 1.5rem; + padding: 0; + appearance: none; + background-color: transparent; } + .form-range:focus { + outline: 0; } + .form-range:focus::-webkit-slider-thumb { + box-shadow: 0 0 0 1px #fff, 0 0 0 0.25rem rgba(24, 157, 208, 0.25); } + .form-range:focus::-moz-range-thumb { + box-shadow: 0 0 0 1px #fff, 0 0 0 0.25rem rgba(24, 157, 208, 0.25); } + .form-range::-moz-focus-outer { + border: 0; } + .form-range::-webkit-slider-thumb { + width: 1rem; + height: 1rem; + margin-top: -0.25rem; + appearance: none; + background-color: #189DD0; + background-image: var(--bs-gradient); + border: 0; + border-radius: 1rem; + box-shadow: 0 0.1rem 0.25rem rgba(0, 0, 0, 0.1); + transition: background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; } + @media (prefers-reduced-motion: reduce) { + .form-range::-webkit-slider-thumb { + transition: none; } } + .form-range::-webkit-slider-thumb:active { + background-color: #bae2f1; + background-image: var(--bs-gradient); } + .form-range::-webkit-slider-runnable-track { + width: 100%; + height: 0.5rem; + color: transparent; + cursor: pointer; + background-color: var(--bs-secondary-bg); + border-color: transparent; + border-radius: 1rem; + box-shadow: var(--bs-box-shadow-inset); } + .form-range::-moz-range-thumb { + width: 1rem; + height: 1rem; + appearance: none; + background-color: #189DD0; + background-image: var(--bs-gradient); + border: 0; + border-radius: 1rem; + box-shadow: 0 0.1rem 0.25rem rgba(0, 0, 0, 0.1); + transition: background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; } + @media (prefers-reduced-motion: reduce) { + .form-range::-moz-range-thumb { + transition: none; } } + .form-range::-moz-range-thumb:active { + background-color: #bae2f1; + background-image: var(--bs-gradient); } + .form-range::-moz-range-track { + width: 100%; + height: 0.5rem; + color: transparent; + cursor: pointer; + background-color: var(--bs-secondary-bg); + border-color: transparent; + border-radius: 1rem; + box-shadow: var(--bs-box-shadow-inset); } + .form-range:disabled { + pointer-events: none; } + .form-range:disabled::-webkit-slider-thumb { + background-color: var(--bs-secondary-color); } + .form-range:disabled::-moz-range-thumb { + background-color: var(--bs-secondary-color); } + +.form-floating { + position: relative; } + .form-floating > .form-control, + .form-floating > .form-control-plaintext, + .form-floating > .form-select { + height: calc(3.5rem + calc(var(--bs-border-width) * 2)); + min-height: calc(3.5rem + calc(var(--bs-border-width) * 2)); + line-height: 1.25; } + .form-floating > label { + position: absolute; + top: 0; + left: 0; + z-index: 2; + height: 100%; + padding: 1rem 0.75rem; + overflow: hidden; + text-align: start; + text-overflow: ellipsis; + white-space: nowrap; + pointer-events: none; + border: var(--bs-border-width) solid transparent; + transform-origin: 0 0; + transition: opacity 0.1s ease-in-out, transform 0.1s ease-in-out; } + @media (prefers-reduced-motion: reduce) { + .form-floating > label { + transition: none; } } + .form-floating > .form-control, + .form-floating > .form-control-plaintext { + padding: 1rem 0.75rem; } + .form-floating > .form-control::placeholder, + .form-floating > .form-control-plaintext::placeholder { + color: transparent; } + .form-floating > .form-control:focus, .form-floating > .form-control:not(:placeholder-shown), + .form-floating > .form-control-plaintext:focus, + .form-floating > .form-control-plaintext:not(:placeholder-shown) { + padding-top: 1.625rem; + padding-bottom: 0.625rem; } + .form-floating > .form-control:-webkit-autofill, + .form-floating > .form-control-plaintext:-webkit-autofill { + padding-top: 1.625rem; + padding-bottom: 0.625rem; } + .form-floating > .form-select { + padding-top: 1.625rem; + padding-bottom: 0.625rem; } + .form-floating > .form-control:focus ~ label, + .form-floating > .form-control:not(:placeholder-shown) ~ label, + .form-floating > .form-control-plaintext ~ label, + .form-floating > .form-select ~ label { + color: rgba(var(--bs-body-color-rgb), 0.65); + transform: scale(0.85) translateY(-0.5rem) translateX(0.15rem); } + .form-floating > .form-control:focus ~ label::after, + .form-floating > .form-control:not(:placeholder-shown) ~ label::after, + .form-floating > .form-control-plaintext ~ label::after, + .form-floating > .form-select ~ label::after { + position: absolute; + inset: 1rem 0.375rem; + z-index: -1; + height: 1.5em; + content: ""; + background-color: var(--bs-body-bg); + border-radius: var(--bs-border-radius); } + .form-floating > .form-control:-webkit-autofill ~ label { + color: rgba(var(--bs-body-color-rgb), 0.65); + transform: scale(0.85) translateY(-0.5rem) translateX(0.15rem); } + .form-floating > .form-control-plaintext ~ label { + border-width: var(--bs-border-width) 0; } + .form-floating > :disabled ~ label, + .form-floating > .form-control:disabled ~ label { + color: #6c757d; } + .form-floating > :disabled ~ label::after, + .form-floating > .form-control:disabled ~ label::after { + background-color: var(--bs-secondary-bg); } + +.input-group { + position: relative; + display: flex; + flex-wrap: wrap; + align-items: stretch; + width: 100%; } + .input-group > .form-control, + .input-group > .form-select, + .input-group > .form-floating { + position: relative; + flex: 1 1 auto; + width: 1%; + min-width: 0; } + .input-group > .form-control:focus, + .input-group > .form-select:focus, + .input-group > .form-floating:focus-within { + z-index: 5; } + .input-group .btn, .input-group div.drawio button, div.drawio .input-group button, .input-group .td-blog .td-rss-button, .td-blog .input-group .td-rss-button { + position: relative; + z-index: 2; } + .input-group .btn:focus, .input-group div.drawio button:focus, div.drawio .input-group button:focus, .input-group .td-blog .td-rss-button:focus, .td-blog .input-group .td-rss-button:focus { + z-index: 5; } + +.input-group-text { + display: flex; + align-items: center; + padding: 0.375rem 0.75rem; + font-size: 1rem; + font-weight: 400; + line-height: 1.5; + color: var(--bs-body-color); + text-align: center; + white-space: nowrap; + background-color: var(--bs-tertiary-bg); + border: var(--bs-border-width) solid var(--bs-border-color); + border-radius: var(--bs-border-radius); } + +.input-group-lg > .form-control, +.input-group-lg > .form-select, +.input-group-lg > .input-group-text, +.input-group-lg > .btn, +div.drawio .input-group-lg > button, +.td-blog .input-group-lg > .td-rss-button { + padding: 0.5rem 1rem; + font-size: 1.25rem; + border-radius: var(--bs-border-radius-lg); } + +.input-group-sm > .form-control, +.input-group-sm > .form-select, +.input-group-sm > .input-group-text, +.input-group-sm > .btn, +div.drawio .input-group-sm > button, +.td-blog .input-group-sm > .td-rss-button { + padding: 0.25rem 0.5rem; + font-size: 0.875rem; + border-radius: var(--bs-border-radius-sm); } + +.input-group-lg > .form-select, +.input-group-sm > .form-select { + padding-right: 3rem; } + +.input-group:not(.has-validation) > :not(:last-child):not(.dropdown-toggle):not(.dropdown-menu):not(.form-floating), +.input-group:not(.has-validation) > .dropdown-toggle:nth-last-child(n + 3), +.input-group:not(.has-validation) > .form-floating:not(:last-child) > .form-control, +.input-group:not(.has-validation) > .form-floating:not(:last-child) > .form-select { + border-top-right-radius: 0; + border-bottom-right-radius: 0; } + +.input-group.has-validation > :nth-last-child(n + 3):not(.dropdown-toggle):not(.dropdown-menu):not(.form-floating), +.input-group.has-validation > .dropdown-toggle:nth-last-child(n + 4), +.input-group.has-validation > .form-floating:nth-last-child(n + 3) > .form-control, +.input-group.has-validation > .form-floating:nth-last-child(n + 3) > .form-select { + border-top-right-radius: 0; + border-bottom-right-radius: 0; } + +.input-group > :not(:first-child):not(.dropdown-menu):not(.valid-tooltip):not(.valid-feedback):not(.invalid-tooltip):not(.invalid-feedback) { + margin-left: calc(var(--bs-border-width) * -1); + border-top-left-radius: 0; + border-bottom-left-radius: 0; } + +.input-group > .form-floating:not(:first-child) > .form-control, +.input-group > .form-floating:not(:first-child) > .form-select { + border-top-left-radius: 0; + border-bottom-left-radius: 0; } + +.valid-feedback { + display: none; + width: 100%; + margin-top: 0.25rem; + font-size: 0.875em; + color: var(--bs-form-valid-color); } + +.valid-tooltip { + position: absolute; + top: 100%; + z-index: 5; + display: none; + max-width: 100%; + padding: 0.25rem 0.5rem; + margin-top: .1rem; + font-size: 0.875rem; + color: #fff; + background-color: var(--bs-success); + border-radius: var(--bs-border-radius); } + +.was-validated :valid ~ .valid-feedback, +.was-validated :valid ~ .valid-tooltip, +.is-valid ~ .valid-feedback, +.is-valid ~ .valid-tooltip { + display: block; } + +.was-validated .form-control:valid, .form-control.is-valid { + border-color: var(--bs-form-valid-border-color); + padding-right: calc(1.5em + 0.75rem); + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%235ca012' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e"); + background-repeat: no-repeat; + background-position: right calc(0.375em + 0.1875rem) center; + background-size: calc(0.75em + 0.375rem) calc(0.75em + 0.375rem); } + .was-validated .form-control:valid:focus, .form-control.is-valid:focus { + border-color: var(--bs-form-valid-border-color); + box-shadow: var(--bs-box-shadow-inset), 0 0 0 0.25rem rgba(var(--bs-success-rgb), 0.25); } + +.was-validated textarea.form-control:valid, textarea.form-control.is-valid { + padding-right: calc(1.5em + 0.75rem); + background-position: top calc(0.375em + 0.1875rem) right calc(0.375em + 0.1875rem); } + +.was-validated .form-select:valid, .form-select.is-valid { + border-color: var(--bs-form-valid-border-color); } + .was-validated .form-select:valid:not([multiple]):not([size]), .was-validated .form-select:valid:not([multiple])[size="1"], .form-select.is-valid:not([multiple]):not([size]), .form-select.is-valid:not([multiple])[size="1"] { + --bs-form-select-bg-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%235ca012' d='M2.3 6.73.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e"); + padding-right: 4.125rem; + background-position: right 0.75rem center, center right 2.25rem; + background-size: 16px 12px, calc(0.75em + 0.375rem) calc(0.75em + 0.375rem); } + .was-validated .form-select:valid:focus, .form-select.is-valid:focus { + border-color: var(--bs-form-valid-border-color); + box-shadow: var(--bs-box-shadow-inset), 0 0 0 0.25rem rgba(var(--bs-success-rgb), 0.25); } + +.was-validated .form-control-color:valid, .form-control-color.is-valid { + width: calc(3rem + calc(1.5em + 0.75rem)); } + +.was-validated .form-check-input:valid, .form-check-input.is-valid { + border-color: var(--bs-form-valid-border-color); } + .was-validated .form-check-input:valid:checked, .form-check-input.is-valid:checked { + background-color: var(--bs-form-valid-color); } + .was-validated .form-check-input:valid:focus, .form-check-input.is-valid:focus { + box-shadow: 0 0 0 0.25rem rgba(var(--bs-success-rgb), 0.25); } + .was-validated .form-check-input:valid ~ .form-check-label, .form-check-input.is-valid ~ .form-check-label { + color: var(--bs-form-valid-color); } + +.form-check-inline .form-check-input ~ .valid-feedback { + margin-left: .5em; } + +.was-validated .input-group > .form-control:not(:focus):valid, .input-group > .form-control:not(:focus).is-valid, .was-validated .input-group > .form-select:not(:focus):valid, +.input-group > .form-select:not(:focus).is-valid, .was-validated .input-group > .form-floating:not(:focus-within):valid, +.input-group > .form-floating:not(:focus-within).is-valid { + z-index: 3; } + +.invalid-feedback { + display: none; + width: 100%; + margin-top: 0.25rem; + font-size: 0.875em; + color: var(--bs-form-invalid-color); } + +.invalid-tooltip { + position: absolute; + top: 100%; + z-index: 5; + display: none; + max-width: 100%; + padding: 0.25rem 0.5rem; + margin-top: .1rem; + font-size: 0.875rem; + color: #fff; + background-color: var(--bs-danger); + border-radius: var(--bs-border-radius); } + +.was-validated :invalid ~ .invalid-feedback, +.was-validated :invalid ~ .invalid-tooltip, +.is-invalid ~ .invalid-feedback, +.is-invalid ~ .invalid-tooltip { + display: block; } + +.was-validated .form-control:invalid, .form-control.is-invalid { + border-color: var(--bs-form-invalid-border-color); + padding-right: calc(1.5em + 0.75rem); + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23fe4954'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23fe4954' stroke='none'/%3e%3c/svg%3e"); + background-repeat: no-repeat; + background-position: right calc(0.375em + 0.1875rem) center; + background-size: calc(0.75em + 0.375rem) calc(0.75em + 0.375rem); } + .was-validated .form-control:invalid:focus, .form-control.is-invalid:focus { + border-color: var(--bs-form-invalid-border-color); + box-shadow: var(--bs-box-shadow-inset), 0 0 0 0.25rem rgba(var(--bs-danger-rgb), 0.25); } + +.was-validated textarea.form-control:invalid, textarea.form-control.is-invalid { + padding-right: calc(1.5em + 0.75rem); + background-position: top calc(0.375em + 0.1875rem) right calc(0.375em + 0.1875rem); } + +.was-validated .form-select:invalid, .form-select.is-invalid { + border-color: var(--bs-form-invalid-border-color); } + .was-validated .form-select:invalid:not([multiple]):not([size]), .was-validated .form-select:invalid:not([multiple])[size="1"], .form-select.is-invalid:not([multiple]):not([size]), .form-select.is-invalid:not([multiple])[size="1"] { + --bs-form-select-bg-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 12 12' width='12' height='12' fill='none' stroke='%23fe4954'%3e%3ccircle cx='6' cy='6' r='4.5'/%3e%3cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3e%3ccircle cx='6' cy='8.2' r='.6' fill='%23fe4954' stroke='none'/%3e%3c/svg%3e"); + padding-right: 4.125rem; + background-position: right 0.75rem center, center right 2.25rem; + background-size: 16px 12px, calc(0.75em + 0.375rem) calc(0.75em + 0.375rem); } + .was-validated .form-select:invalid:focus, .form-select.is-invalid:focus { + border-color: var(--bs-form-invalid-border-color); + box-shadow: var(--bs-box-shadow-inset), 0 0 0 0.25rem rgba(var(--bs-danger-rgb), 0.25); } + +.was-validated .form-control-color:invalid, .form-control-color.is-invalid { + width: calc(3rem + calc(1.5em + 0.75rem)); } + +.was-validated .form-check-input:invalid, .form-check-input.is-invalid { + border-color: var(--bs-form-invalid-border-color); } + .was-validated .form-check-input:invalid:checked, .form-check-input.is-invalid:checked { + background-color: var(--bs-form-invalid-color); } + .was-validated .form-check-input:invalid:focus, .form-check-input.is-invalid:focus { + box-shadow: 0 0 0 0.25rem rgba(var(--bs-danger-rgb), 0.25); } + .was-validated .form-check-input:invalid ~ .form-check-label, .form-check-input.is-invalid ~ .form-check-label { + color: var(--bs-form-invalid-color); } + +.form-check-inline .form-check-input ~ .invalid-feedback { + margin-left: .5em; } + +.was-validated .input-group > .form-control:not(:focus):invalid, .input-group > .form-control:not(:focus).is-invalid, .was-validated .input-group > .form-select:not(:focus):invalid, +.input-group > .form-select:not(:focus).is-invalid, .was-validated .input-group > .form-floating:not(:focus-within):invalid, +.input-group > .form-floating:not(:focus-within).is-invalid { + z-index: 4; } + +.btn, div.drawio button, .td-blog .td-rss-button { + --bs-btn-padding-x: 0.75rem; + --bs-btn-padding-y: 0.375rem; + --bs-btn-font-family: ; + --bs-btn-font-size: 1rem; + --bs-btn-font-weight: 400; + --bs-btn-line-height: 1.5; + --bs-btn-color: var(--bs-body-color); + --bs-btn-bg: transparent; + --bs-btn-border-width: var(--bs-border-width); + --bs-btn-border-color: transparent; + --bs-btn-border-radius: var(--bs-border-radius); + --bs-btn-hover-border-color: transparent; + --bs-btn-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075); + --bs-btn-disabled-opacity: 0.65; + --bs-btn-focus-box-shadow: 0 0 0 0.25rem rgba(var(--bs-btn-focus-shadow-rgb), .5); + display: inline-block; + padding: var(--bs-btn-padding-y) var(--bs-btn-padding-x); + font-family: var(--bs-btn-font-family); + font-size: var(--bs-btn-font-size); + font-weight: var(--bs-btn-font-weight); + line-height: var(--bs-btn-line-height); + color: var(--bs-btn-color); + text-align: center; + text-decoration: none; + vertical-align: middle; + cursor: pointer; + user-select: none; + border: var(--bs-btn-border-width) solid var(--bs-btn-border-color); + border-radius: var(--bs-btn-border-radius); + background-color: var(--bs-btn-bg); + background-image: var(--bs-gradient); + box-shadow: var(--bs-btn-box-shadow); + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; } + @media (prefers-reduced-motion: reduce) { + .btn, div.drawio button, .td-blog .td-rss-button { + transition: none; } } + .btn:hover, div.drawio button:hover, .td-blog .td-rss-button:hover { + color: var(--bs-btn-hover-color); + background-color: var(--bs-btn-hover-bg); + border-color: var(--bs-btn-hover-border-color); } + .btn-check + .btn:hover, div.drawio .btn-check + button:hover, .td-blog .btn-check + .td-rss-button:hover { + color: var(--bs-btn-color); + background-color: var(--bs-btn-bg); + border-color: var(--bs-btn-border-color); } + .btn:focus-visible, div.drawio button:focus-visible, .td-blog .td-rss-button:focus-visible { + color: var(--bs-btn-hover-color); + background-color: var(--bs-btn-hover-bg); + background-image: var(--bs-gradient); + border-color: var(--bs-btn-hover-border-color); + outline: 0; + box-shadow: var(--bs-btn-box-shadow), var(--bs-btn-focus-box-shadow); } + .btn-check:focus-visible + .btn, div.drawio .btn-check:focus-visible + button, .td-blog .btn-check:focus-visible + .td-rss-button { + border-color: var(--bs-btn-hover-border-color); + outline: 0; + box-shadow: var(--bs-btn-box-shadow), var(--bs-btn-focus-box-shadow); } + .btn-check:checked + .btn, div.drawio .btn-check:checked + button, .td-blog .btn-check:checked + .td-rss-button, :not(.btn-check) + .btn:active, div.drawio :not(.btn-check) + button:active, .td-blog :not(.btn-check) + .td-rss-button:active, .btn:first-child:active, div.drawio button:first-child:active, .td-blog .td-rss-button:first-child:active, .btn.active, div.drawio button.active, .td-blog .active.td-rss-button, .btn.show, div.drawio button.show, .td-blog .show.td-rss-button { + color: var(--bs-btn-active-color); + background-color: var(--bs-btn-active-bg); + background-image: none; + border-color: var(--bs-btn-active-border-color); + box-shadow: var(--bs-btn-active-shadow); } + .btn-check:checked + .btn:focus-visible, div.drawio .btn-check:checked + button:focus-visible, .td-blog .btn-check:checked + .td-rss-button:focus-visible, :not(.btn-check) + .btn:active:focus-visible, div.drawio :not(.btn-check) + button:active:focus-visible, .td-blog :not(.btn-check) + .td-rss-button:active:focus-visible, .btn:first-child:active:focus-visible, div.drawio button:first-child:active:focus-visible, .td-blog .td-rss-button:first-child:active:focus-visible, .btn.active:focus-visible, div.drawio button.active:focus-visible, .td-blog .active.td-rss-button:focus-visible, .btn.show:focus-visible, div.drawio button.show:focus-visible, .td-blog .show.td-rss-button:focus-visible { + box-shadow: var(--bs-btn-active-shadow), var(--bs-btn-focus-box-shadow); } + .btn-check:checked:focus-visible + .btn, div.drawio .btn-check:checked:focus-visible + button, .td-blog .btn-check:checked:focus-visible + .td-rss-button { + box-shadow: var(--bs-btn-active-shadow), var(--bs-btn-focus-box-shadow); } + .btn:disabled, div.drawio button:disabled, .td-blog .td-rss-button:disabled, .btn.disabled, div.drawio button.disabled, .td-blog .disabled.td-rss-button, fieldset:disabled .btn, fieldset:disabled div.drawio button, div.drawio fieldset:disabled button, fieldset:disabled .td-blog .td-rss-button, .td-blog fieldset:disabled .td-rss-button { + color: var(--bs-btn-disabled-color); + pointer-events: none; + background-color: var(--bs-btn-disabled-bg); + background-image: none; + border-color: var(--bs-btn-disabled-border-color); + opacity: var(--bs-btn-disabled-opacity); + box-shadow: none; } + +.btn-primary { + --bs-btn-color: #000; + --bs-btn-bg: #189DD0; + --bs-btn-border-color: #189DD0; + --bs-btn-hover-color: #000; + --bs-btn-hover-bg: #3bacd7; + --bs-btn-hover-border-color: #2fa7d5; + --bs-btn-focus-shadow-rgb: 20, 133, 177; + --bs-btn-active-color: #000; + --bs-btn-active-bg: #46b1d9; + --bs-btn-active-border-color: #2fa7d5; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #000; + --bs-btn-disabled-bg: #189DD0; + --bs-btn-disabled-border-color: #189DD0; } + +.btn-secondary { + --bs-btn-color: #000; + --bs-btn-bg: #ffcc00; + --bs-btn-border-color: #ffcc00; + --bs-btn-hover-color: #000; + --bs-btn-hover-bg: #ffd426; + --bs-btn-hover-border-color: #ffd11a; + --bs-btn-focus-shadow-rgb: 217, 173, 0; + --bs-btn-active-color: #000; + --bs-btn-active-bg: #ffd633; + --bs-btn-active-border-color: #ffd11a; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #000; + --bs-btn-disabled-bg: #ffcc00; + --bs-btn-disabled-border-color: #ffcc00; } + +.btn-success { + --bs-btn-color: #000; + --bs-btn-bg: #5ca012; + --bs-btn-border-color: #5ca012; + --bs-btn-hover-color: #000; + --bs-btn-hover-bg: #74ae36; + --bs-btn-hover-border-color: #6caa2a; + --bs-btn-focus-shadow-rgb: 78, 136, 15; + --bs-btn-active-color: #000; + --bs-btn-active-bg: #7db341; + --bs-btn-active-border-color: #6caa2a; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #000; + --bs-btn-disabled-bg: #5ca012; + --bs-btn-disabled-border-color: #5ca012; } + +.btn-info, .td-blog .td-rss-button { + --bs-btn-color: #fff; + --bs-btn-bg: #667373; + --bs-btn-border-color: #667373; + --bs-btn-hover-color: #fff; + --bs-btn-hover-bg: #576262; + --bs-btn-hover-border-color: #525c5c; + --bs-btn-focus-shadow-rgb: 125, 136, 136; + --bs-btn-active-color: #fff; + --bs-btn-active-bg: #525c5c; + --bs-btn-active-border-color: #4d5656; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #fff; + --bs-btn-disabled-bg: #667373; + --bs-btn-disabled-border-color: #667373; } + +.btn-warning { + --bs-btn-color: #000; + --bs-btn-bg: #ed6a5a; + --bs-btn-border-color: #ed6a5a; + --bs-btn-hover-color: #000; + --bs-btn-hover-bg: #f08073; + --bs-btn-hover-border-color: #ef796b; + --bs-btn-focus-shadow-rgb: 201, 90, 77; + --bs-btn-active-color: #000; + --bs-btn-active-bg: #f1887b; + --bs-btn-active-border-color: #ef796b; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #000; + --bs-btn-disabled-bg: #ed6a5a; + --bs-btn-disabled-border-color: #ed6a5a; } + +.btn-danger { + --bs-btn-color: #000; + --bs-btn-bg: #fe4954; + --bs-btn-border-color: #fe4954; + --bs-btn-hover-color: #000; + --bs-btn-hover-bg: #fe646e; + --bs-btn-hover-border-color: #fe5b65; + --bs-btn-focus-shadow-rgb: 216, 62, 71; + --bs-btn-active-color: #000; + --bs-btn-active-bg: #fe6d76; + --bs-btn-active-border-color: #fe5b65; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #000; + --bs-btn-disabled-bg: #fe4954; + --bs-btn-disabled-border-color: #fe4954; } + +.btn-light { + --bs-btn-color: #000; + --bs-btn-bg: #d3f3ee; + --bs-btn-border-color: #d3f3ee; + --bs-btn-hover-color: #000; + --bs-btn-hover-bg: #b3cfca; + --bs-btn-hover-border-color: #a9c2be; + --bs-btn-focus-shadow-rgb: 179, 207, 202; + --bs-btn-active-color: #000; + --bs-btn-active-bg: #a9c2be; + --bs-btn-active-border-color: #9eb6b3; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #000; + --bs-btn-disabled-bg: #d3f3ee; + --bs-btn-disabled-border-color: #d3f3ee; } + +.btn-dark { + --bs-btn-color: #fff; + --bs-btn-bg: #403f4c; + --bs-btn-border-color: #403f4c; + --bs-btn-hover-color: #fff; + --bs-btn-hover-bg: #5d5c67; + --bs-btn-hover-border-color: #53525e; + --bs-btn-focus-shadow-rgb: 93, 92, 103; + --bs-btn-active-color: #fff; + --bs-btn-active-bg: #666570; + --bs-btn-active-border-color: #53525e; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #fff; + --bs-btn-disabled-bg: #403f4c; + --bs-btn-disabled-border-color: #403f4c; } + +.btn-outline-primary, div.drawio button { + --bs-btn-color: #189DD0; + --bs-btn-border-color: #189DD0; + --bs-btn-hover-color: #000; + --bs-btn-hover-bg: #189DD0; + --bs-btn-hover-border-color: #189DD0; + --bs-btn-focus-shadow-rgb: 24, 157, 208; + --bs-btn-active-color: #000; + --bs-btn-active-bg: #189DD0; + --bs-btn-active-border-color: #189DD0; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #189DD0; + --bs-btn-disabled-bg: transparent; + --bs-btn-disabled-border-color: #189DD0; + --bs-gradient: none; } + +.btn-outline-secondary { + --bs-btn-color: #ffcc00; + --bs-btn-border-color: #ffcc00; + --bs-btn-hover-color: #000; + --bs-btn-hover-bg: #ffcc00; + --bs-btn-hover-border-color: #ffcc00; + --bs-btn-focus-shadow-rgb: 255, 204, 0; + --bs-btn-active-color: #000; + --bs-btn-active-bg: #ffcc00; + --bs-btn-active-border-color: #ffcc00; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #ffcc00; + --bs-btn-disabled-bg: transparent; + --bs-btn-disabled-border-color: #ffcc00; + --bs-gradient: none; } + +.btn-outline-success { + --bs-btn-color: #5ca012; + --bs-btn-border-color: #5ca012; + --bs-btn-hover-color: #000; + --bs-btn-hover-bg: #5ca012; + --bs-btn-hover-border-color: #5ca012; + --bs-btn-focus-shadow-rgb: 92, 160, 18; + --bs-btn-active-color: #000; + --bs-btn-active-bg: #5ca012; + --bs-btn-active-border-color: #5ca012; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #5ca012; + --bs-btn-disabled-bg: transparent; + --bs-btn-disabled-border-color: #5ca012; + --bs-gradient: none; } + +.btn-outline-info { + --bs-btn-color: #667373; + --bs-btn-border-color: #667373; + --bs-btn-hover-color: #fff; + --bs-btn-hover-bg: #667373; + --bs-btn-hover-border-color: #667373; + --bs-btn-focus-shadow-rgb: 102, 115, 115; + --bs-btn-active-color: #fff; + --bs-btn-active-bg: #667373; + --bs-btn-active-border-color: #667373; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #667373; + --bs-btn-disabled-bg: transparent; + --bs-btn-disabled-border-color: #667373; + --bs-gradient: none; } + +.btn-outline-warning { + --bs-btn-color: #ed6a5a; + --bs-btn-border-color: #ed6a5a; + --bs-btn-hover-color: #000; + --bs-btn-hover-bg: #ed6a5a; + --bs-btn-hover-border-color: #ed6a5a; + --bs-btn-focus-shadow-rgb: 237, 106, 90; + --bs-btn-active-color: #000; + --bs-btn-active-bg: #ed6a5a; + --bs-btn-active-border-color: #ed6a5a; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #ed6a5a; + --bs-btn-disabled-bg: transparent; + --bs-btn-disabled-border-color: #ed6a5a; + --bs-gradient: none; } + +.btn-outline-danger { + --bs-btn-color: #fe4954; + --bs-btn-border-color: #fe4954; + --bs-btn-hover-color: #000; + --bs-btn-hover-bg: #fe4954; + --bs-btn-hover-border-color: #fe4954; + --bs-btn-focus-shadow-rgb: 254, 73, 84; + --bs-btn-active-color: #000; + --bs-btn-active-bg: #fe4954; + --bs-btn-active-border-color: #fe4954; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #fe4954; + --bs-btn-disabled-bg: transparent; + --bs-btn-disabled-border-color: #fe4954; + --bs-gradient: none; } + +.btn-outline-light { + --bs-btn-color: #d3f3ee; + --bs-btn-border-color: #d3f3ee; + --bs-btn-hover-color: #000; + --bs-btn-hover-bg: #d3f3ee; + --bs-btn-hover-border-color: #d3f3ee; + --bs-btn-focus-shadow-rgb: 211, 243, 238; + --bs-btn-active-color: #000; + --bs-btn-active-bg: #d3f3ee; + --bs-btn-active-border-color: #d3f3ee; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #d3f3ee; + --bs-btn-disabled-bg: transparent; + --bs-btn-disabled-border-color: #d3f3ee; + --bs-gradient: none; } + +.btn-outline-dark { + --bs-btn-color: #403f4c; + --bs-btn-border-color: #403f4c; + --bs-btn-hover-color: #fff; + --bs-btn-hover-bg: #403f4c; + --bs-btn-hover-border-color: #403f4c; + --bs-btn-focus-shadow-rgb: 64, 63, 76; + --bs-btn-active-color: #fff; + --bs-btn-active-bg: #403f4c; + --bs-btn-active-border-color: #403f4c; + --bs-btn-active-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); + --bs-btn-disabled-color: #403f4c; + --bs-btn-disabled-bg: transparent; + --bs-btn-disabled-border-color: #403f4c; + --bs-gradient: none; } + +.btn-link { + --bs-btn-font-weight: 400; + --bs-btn-color: var(--bs-link-color); + --bs-btn-bg: transparent; + --bs-btn-border-color: transparent; + --bs-btn-hover-color: var(--bs-link-hover-color); + --bs-btn-hover-border-color: transparent; + --bs-btn-active-color: var(--bs-link-hover-color); + --bs-btn-active-border-color: transparent; + --bs-btn-disabled-color: #6c757d; + --bs-btn-disabled-border-color: transparent; + --bs-btn-box-shadow: 0 0 0 #000; + --bs-btn-focus-shadow-rgb: 49, 132, 253; + text-decoration: underline; + background-image: none; } + .btn-link:focus-visible { + color: var(--bs-btn-color); } + .btn-link:hover { + color: var(--bs-btn-hover-color); } + +.btn-lg, .td-blog .td-rss-button, .btn-group-lg > .btn, div.drawio .btn-group-lg > button { + --bs-btn-padding-y: 0.5rem; + --bs-btn-padding-x: 1rem; + --bs-btn-font-size: 1.25rem; + --bs-btn-border-radius: var(--bs-border-radius-lg); } + +.btn-sm, .btn-group-sm > .btn, div.drawio .btn-group-sm > button, .td-blog .btn-group-sm > .td-rss-button { + --bs-btn-padding-y: 0.25rem; + --bs-btn-padding-x: 0.5rem; + --bs-btn-font-size: 0.875rem; + --bs-btn-border-radius: var(--bs-border-radius-sm); } + +.fade { + transition: opacity 0.15s linear; } + @media (prefers-reduced-motion: reduce) { + .fade { + transition: none; } } + .fade:not(.show) { + opacity: 0; } + +.collapse:not(.show) { + display: none; } + +.collapsing { + height: 0; + overflow: hidden; + transition: height 0.35s ease; } + @media (prefers-reduced-motion: reduce) { + .collapsing { + transition: none; } } + .collapsing.collapse-horizontal { + width: 0; + height: auto; + transition: width 0.35s ease; } + @media (prefers-reduced-motion: reduce) { + .collapsing.collapse-horizontal { + transition: none; } } +.dropup, +.dropend, +.dropdown, +.dropstart, +.dropup-center, +.dropdown-center { + position: relative; } + +.dropdown-toggle { + white-space: nowrap; } + .dropdown-toggle::after { + display: inline-block; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid; + border-right: 0.3em solid transparent; + border-bottom: 0; + border-left: 0.3em solid transparent; } + .dropdown-toggle:empty::after { + margin-left: 0; } + +.dropdown-menu { + --bs-dropdown-zindex: 1000; + --bs-dropdown-min-width: 10rem; + --bs-dropdown-padding-x: 0; + --bs-dropdown-padding-y: 0.5rem; + --bs-dropdown-spacer: 0.125rem; + --bs-dropdown-font-size: 1rem; + --bs-dropdown-color: var(--bs-body-color); + --bs-dropdown-bg: var(--bs-body-bg); + --bs-dropdown-border-color: var(--bs-border-color-translucent); + --bs-dropdown-border-radius: var(--bs-border-radius); + --bs-dropdown-border-width: var(--bs-border-width); + --bs-dropdown-inner-border-radius: calc(var(--bs-border-radius) - var(--bs-border-width)); + --bs-dropdown-divider-bg: var(--bs-border-color-translucent); + --bs-dropdown-divider-margin-y: 0.5rem; + --bs-dropdown-box-shadow: var(--bs-box-shadow); + --bs-dropdown-link-color: var(--bs-body-color); + --bs-dropdown-link-hover-color: var(--bs-body-color); + --bs-dropdown-link-hover-bg: var(--bs-tertiary-bg); + --bs-dropdown-link-active-color: #fff; + --bs-dropdown-link-active-bg: #189DD0; + --bs-dropdown-link-disabled-color: var(--bs-tertiary-color); + --bs-dropdown-item-padding-x: 1rem; + --bs-dropdown-item-padding-y: 0.25rem; + --bs-dropdown-header-color: #6c757d; + --bs-dropdown-header-padding-x: 1rem; + --bs-dropdown-header-padding-y: 0.5rem; + position: absolute; + z-index: var(--bs-dropdown-zindex); + display: none; + min-width: var(--bs-dropdown-min-width); + padding: var(--bs-dropdown-padding-y) var(--bs-dropdown-padding-x); + margin: 0; + font-size: var(--bs-dropdown-font-size); + color: var(--bs-dropdown-color); + text-align: left; + list-style: none; + background-color: var(--bs-dropdown-bg); + background-clip: padding-box; + border: var(--bs-dropdown-border-width) solid var(--bs-dropdown-border-color); + border-radius: var(--bs-dropdown-border-radius); + box-shadow: var(--bs-dropdown-box-shadow); } + .dropdown-menu[data-bs-popper] { + top: 100%; + left: 0; + margin-top: var(--bs-dropdown-spacer); } + +.dropdown-menu-start { + --bs-position: start; } + .dropdown-menu-start[data-bs-popper] { + right: auto; + left: 0; } + +.dropdown-menu-end { + --bs-position: end; } + .dropdown-menu-end[data-bs-popper] { + right: 0; + left: auto; } + +@media (min-width: 576px) { + .dropdown-menu-sm-start { + --bs-position: start; } + .dropdown-menu-sm-start[data-bs-popper] { + right: auto; + left: 0; } + .dropdown-menu-sm-end { + --bs-position: end; } + .dropdown-menu-sm-end[data-bs-popper] { + right: 0; + left: auto; } } + +@media (min-width: 768px) { + .dropdown-menu-md-start { + --bs-position: start; } + .dropdown-menu-md-start[data-bs-popper] { + right: auto; + left: 0; } + .dropdown-menu-md-end { + --bs-position: end; } + .dropdown-menu-md-end[data-bs-popper] { + right: 0; + left: auto; } } + +@media (min-width: 992px) { + .dropdown-menu-lg-start { + --bs-position: start; } + .dropdown-menu-lg-start[data-bs-popper] { + right: auto; + left: 0; } + .dropdown-menu-lg-end { + --bs-position: end; } + .dropdown-menu-lg-end[data-bs-popper] { + right: 0; + left: auto; } } + +@media (min-width: 1200px) { + .dropdown-menu-xl-start { + --bs-position: start; } + .dropdown-menu-xl-start[data-bs-popper] { + right: auto; + left: 0; } + .dropdown-menu-xl-end { + --bs-position: end; } + .dropdown-menu-xl-end[data-bs-popper] { + right: 0; + left: auto; } } + +@media (min-width: 1400px) { + .dropdown-menu-xxl-start { + --bs-position: start; } + .dropdown-menu-xxl-start[data-bs-popper] { + right: auto; + left: 0; } + .dropdown-menu-xxl-end { + --bs-position: end; } + .dropdown-menu-xxl-end[data-bs-popper] { + right: 0; + left: auto; } } + +.dropup .dropdown-menu[data-bs-popper] { + top: auto; + bottom: 100%; + margin-top: 0; + margin-bottom: var(--bs-dropdown-spacer); } + +.dropup .dropdown-toggle::after { + display: inline-block; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0; + border-right: 0.3em solid transparent; + border-bottom: 0.3em solid; + border-left: 0.3em solid transparent; } + +.dropup .dropdown-toggle:empty::after { + margin-left: 0; } + +.dropend .dropdown-menu[data-bs-popper] { + top: 0; + right: auto; + left: 100%; + margin-top: 0; + margin-left: var(--bs-dropdown-spacer); } + +.dropend .dropdown-toggle::after { + display: inline-block; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid transparent; + border-right: 0; + border-bottom: 0.3em solid transparent; + border-left: 0.3em solid; } + +.dropend .dropdown-toggle:empty::after { + margin-left: 0; } + +.dropend .dropdown-toggle::after { + vertical-align: 0; } + +.dropstart .dropdown-menu[data-bs-popper] { + top: 0; + right: 100%; + left: auto; + margin-top: 0; + margin-right: var(--bs-dropdown-spacer); } + +.dropstart .dropdown-toggle::after { + display: inline-block; + margin-left: 0.255em; + vertical-align: 0.255em; + content: ""; } + +.dropstart .dropdown-toggle::after { + display: none; } + +.dropstart .dropdown-toggle::before { + display: inline-block; + margin-right: 0.255em; + vertical-align: 0.255em; + content: ""; + border-top: 0.3em solid transparent; + border-right: 0.3em solid; + border-bottom: 0.3em solid transparent; } + +.dropstart .dropdown-toggle:empty::after { + margin-left: 0; } + +.dropstart .dropdown-toggle::before { + vertical-align: 0; } + +.dropdown-divider { + height: 0; + margin: var(--bs-dropdown-divider-margin-y) 0; + overflow: hidden; + border-top: 1px solid var(--bs-dropdown-divider-bg); + opacity: 1; } + +.dropdown-item { + display: block; + width: 100%; + padding: var(--bs-dropdown-item-padding-y) var(--bs-dropdown-item-padding-x); + clear: both; + font-weight: 400; + color: var(--bs-dropdown-link-color); + text-align: inherit; + text-decoration: none; + white-space: nowrap; + background-color: transparent; + border: 0; + border-radius: var(--bs-dropdown-item-border-radius, 0); } + .dropdown-item:hover, .dropdown-item:focus { + color: var(--bs-dropdown-link-hover-color); + background-color: var(--bs-dropdown-link-hover-bg); + background-image: var(--bs-gradient); } + .dropdown-item.active, .dropdown-item:active { + color: var(--bs-dropdown-link-active-color); + text-decoration: none; + background-color: var(--bs-dropdown-link-active-bg); + background-image: var(--bs-gradient); } + .dropdown-item.disabled, .dropdown-item:disabled { + color: var(--bs-dropdown-link-disabled-color); + pointer-events: none; + background-color: transparent; + background-image: none; } + +.dropdown-menu.show { + display: block; } + +.dropdown-header { + display: block; + padding: var(--bs-dropdown-header-padding-y) var(--bs-dropdown-header-padding-x); + margin-bottom: 0; + font-size: 0.875rem; + color: var(--bs-dropdown-header-color); + white-space: nowrap; } + +.dropdown-item-text { + display: block; + padding: var(--bs-dropdown-item-padding-y) var(--bs-dropdown-item-padding-x); + color: var(--bs-dropdown-link-color); } + +.dropdown-menu-dark { + --bs-dropdown-color: #dee2e6; + --bs-dropdown-bg: #343a40; + --bs-dropdown-border-color: var(--bs-border-color-translucent); + --bs-dropdown-box-shadow: ; + --bs-dropdown-link-color: #dee2e6; + --bs-dropdown-link-hover-color: #fff; + --bs-dropdown-divider-bg: var(--bs-border-color-translucent); + --bs-dropdown-link-hover-bg: rgba(255, 255, 255, 0.15); + --bs-dropdown-link-active-color: #fff; + --bs-dropdown-link-active-bg: #189DD0; + --bs-dropdown-link-disabled-color: #adb5bd; + --bs-dropdown-header-color: #adb5bd; } + +.btn-group, +.btn-group-vertical { + position: relative; + display: inline-flex; + vertical-align: middle; } + .btn-group > .btn, div.drawio .btn-group > button, .td-blog .btn-group > .td-rss-button, + .btn-group-vertical > .btn, + div.drawio .btn-group-vertical > button, + .td-blog .btn-group-vertical > .td-rss-button { + position: relative; + flex: 1 1 auto; } + .btn-group > .btn-check:checked + .btn, div.drawio .btn-group > .btn-check:checked + button, .td-blog .btn-group > .btn-check:checked + .td-rss-button, + .btn-group > .btn-check:focus + .btn, + div.drawio .btn-group > .btn-check:focus + button, + .td-blog .btn-group > .btn-check:focus + .td-rss-button, + .btn-group > .btn:hover, + div.drawio .btn-group > button:hover, + .td-blog .btn-group > .td-rss-button:hover, + .btn-group > .btn:focus, + div.drawio .btn-group > button:focus, + .td-blog .btn-group > .td-rss-button:focus, + .btn-group > .btn:active, + div.drawio .btn-group > button:active, + .td-blog .btn-group > .td-rss-button:active, + .btn-group > .btn.active, + div.drawio .btn-group > button.active, + .td-blog .btn-group > .active.td-rss-button, + .btn-group-vertical > .btn-check:checked + .btn, + div.drawio .btn-group-vertical > .btn-check:checked + button, + .td-blog .btn-group-vertical > .btn-check:checked + .td-rss-button, + .btn-group-vertical > .btn-check:focus + .btn, + div.drawio .btn-group-vertical > .btn-check:focus + button, + .td-blog .btn-group-vertical > .btn-check:focus + .td-rss-button, + .btn-group-vertical > .btn:hover, + div.drawio .btn-group-vertical > button:hover, + .td-blog .btn-group-vertical > .td-rss-button:hover, + .btn-group-vertical > .btn:focus, + div.drawio .btn-group-vertical > button:focus, + .td-blog .btn-group-vertical > .td-rss-button:focus, + .btn-group-vertical > .btn:active, + div.drawio .btn-group-vertical > button:active, + .td-blog .btn-group-vertical > .td-rss-button:active, + .btn-group-vertical > .btn.active, + div.drawio .btn-group-vertical > button.active, + .td-blog .btn-group-vertical > .active.td-rss-button { + z-index: 1; } + +.btn-toolbar { + display: flex; + flex-wrap: wrap; + justify-content: flex-start; } + .btn-toolbar .input-group { + width: auto; } + +.btn-group { + border-radius: var(--bs-border-radius); } + .btn-group > :not(.btn-check:first-child) + .btn, div.drawio .btn-group > :not(.btn-check:first-child) + button, .td-blog .btn-group > :not(.btn-check:first-child) + .td-rss-button, + .btn-group > .btn-group:not(:first-child) { + margin-left: calc(var(--bs-border-width) * -1); } + .btn-group > .btn:not(:last-child):not(.dropdown-toggle), div.drawio .btn-group > button:not(:last-child):not(.dropdown-toggle), .td-blog .btn-group > .td-rss-button:not(:last-child):not(.dropdown-toggle), + .btn-group > .btn.dropdown-toggle-split:first-child, + div.drawio .btn-group > button.dropdown-toggle-split:first-child, + .td-blog .btn-group > .dropdown-toggle-split.td-rss-button:first-child, + .btn-group > .btn-group:not(:last-child) > .btn, + div.drawio .btn-group > .btn-group:not(:last-child) > button, + .td-blog .btn-group > .btn-group:not(:last-child) > .td-rss-button { + border-top-right-radius: 0; + border-bottom-right-radius: 0; } + .btn-group > .btn:nth-child(n + 3), div.drawio .btn-group > button:nth-child(n + 3), .td-blog .btn-group > .td-rss-button:nth-child(n + 3), + .btn-group > :not(.btn-check) + .btn, + div.drawio .btn-group > :not(.btn-check) + button, + .td-blog .btn-group > :not(.btn-check) + .td-rss-button, + .btn-group > .btn-group:not(:first-child) > .btn, + div.drawio .btn-group > .btn-group:not(:first-child) > button, + .td-blog .btn-group > .btn-group:not(:first-child) > .td-rss-button { + border-top-left-radius: 0; + border-bottom-left-radius: 0; } + +.dropdown-toggle-split { + padding-right: 0.5625rem; + padding-left: 0.5625rem; } + .dropdown-toggle-split::after, .dropup .dropdown-toggle-split::after, .dropend .dropdown-toggle-split::after { + margin-left: 0; } + .dropstart .dropdown-toggle-split::before { + margin-right: 0; } + +.btn-sm + .dropdown-toggle-split, .btn-group-sm > .btn + .dropdown-toggle-split, div.drawio .btn-group-sm > button + .dropdown-toggle-split, .td-blog .btn-group-sm > .td-rss-button + .dropdown-toggle-split { + padding-right: 0.375rem; + padding-left: 0.375rem; } + +.btn-lg + .dropdown-toggle-split, .td-blog .td-rss-button + .dropdown-toggle-split, .btn-group-lg > .btn + .dropdown-toggle-split, div.drawio .btn-group-lg > button + .dropdown-toggle-split, .td-blog .btn-group-lg > .td-rss-button + .dropdown-toggle-split { + padding-right: 0.75rem; + padding-left: 0.75rem; } + +.btn-group.show .dropdown-toggle { + box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); } + .btn-group.show .dropdown-toggle.btn-link { + box-shadow: none; } + +.btn-group-vertical { + flex-direction: column; + align-items: flex-start; + justify-content: center; } + .btn-group-vertical > .btn, div.drawio .btn-group-vertical > button, .td-blog .btn-group-vertical > .td-rss-button, + .btn-group-vertical > .btn-group { + width: 100%; } + .btn-group-vertical > .btn:not(:first-child), div.drawio .btn-group-vertical > button:not(:first-child), .td-blog .btn-group-vertical > .td-rss-button:not(:first-child), + .btn-group-vertical > .btn-group:not(:first-child) { + margin-top: calc(var(--bs-border-width) * -1); } + .btn-group-vertical > .btn:not(:last-child):not(.dropdown-toggle), div.drawio .btn-group-vertical > button:not(:last-child):not(.dropdown-toggle), .td-blog .btn-group-vertical > .td-rss-button:not(:last-child):not(.dropdown-toggle), + .btn-group-vertical > .btn-group:not(:last-child) > .btn, + div.drawio .btn-group-vertical > .btn-group:not(:last-child) > button, + .td-blog .btn-group-vertical > .btn-group:not(:last-child) > .td-rss-button { + border-bottom-right-radius: 0; + border-bottom-left-radius: 0; } + .btn-group-vertical > .btn ~ .btn, div.drawio .btn-group-vertical > button ~ .btn, div.drawio .btn-group-vertical > .btn ~ button, div.drawio .btn-group-vertical > button ~ button, .td-blog .btn-group-vertical > .td-rss-button ~ .btn, .td-blog div.drawio .btn-group-vertical > .td-rss-button ~ button, div.drawio .td-blog .btn-group-vertical > .td-rss-button ~ button, .td-blog .btn-group-vertical > .btn ~ .td-rss-button, .td-blog div.drawio .btn-group-vertical > button ~ .td-rss-button, div.drawio .td-blog .btn-group-vertical > button ~ .td-rss-button, .td-blog .btn-group-vertical > .td-rss-button ~ .td-rss-button, + .btn-group-vertical > .btn-group:not(:first-child) > .btn, + div.drawio .btn-group-vertical > .btn-group:not(:first-child) > button, + .td-blog .btn-group-vertical > .btn-group:not(:first-child) > .td-rss-button { + border-top-left-radius: 0; + border-top-right-radius: 0; } + +.nav { + --bs-nav-link-padding-x: 1rem; + --bs-nav-link-padding-y: 0.5rem; + --bs-nav-link-font-weight: ; + --bs-nav-link-color: var(--bs-link-color); + --bs-nav-link-hover-color: var(--bs-link-hover-color); + --bs-nav-link-disabled-color: var(--bs-secondary-color); + display: flex; + flex-wrap: wrap; + padding-left: 0; + margin-bottom: 0; + list-style: none; } + +.nav-link { + display: block; + padding: var(--bs-nav-link-padding-y) var(--bs-nav-link-padding-x); + font-size: var(--bs-nav-link-font-size); + font-weight: var(--bs-nav-link-font-weight); + color: var(--bs-nav-link-color); + text-decoration: none; + background: none; + border: 0; + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out; } + @media (prefers-reduced-motion: reduce) { + .nav-link { + transition: none; } } + .nav-link:hover, .nav-link:focus { + color: var(--bs-nav-link-hover-color); } + .nav-link:focus-visible { + outline: 0; + box-shadow: 0 0 0 0.25rem rgba(24, 157, 208, 0.25); } + .nav-link.disabled, .nav-link:disabled { + color: var(--bs-nav-link-disabled-color); + pointer-events: none; + cursor: default; } + +.nav-tabs { + --bs-nav-tabs-border-width: var(--bs-border-width); + --bs-nav-tabs-border-color: var(--bs-border-color); + --bs-nav-tabs-border-radius: var(--bs-border-radius); + --bs-nav-tabs-link-hover-border-color: var(--bs-secondary-bg) var(--bs-secondary-bg) var(--bs-border-color); + --bs-nav-tabs-link-active-color: var(--bs-emphasis-color); + --bs-nav-tabs-link-active-bg: var(--bs-body-bg); + --bs-nav-tabs-link-active-border-color: var(--bs-border-color) var(--bs-border-color) var(--bs-body-bg); + border-bottom: var(--bs-nav-tabs-border-width) solid var(--bs-nav-tabs-border-color); } + .nav-tabs .nav-link { + margin-bottom: calc(-1 * var(--bs-nav-tabs-border-width)); + border: var(--bs-nav-tabs-border-width) solid transparent; + border-top-left-radius: var(--bs-nav-tabs-border-radius); + border-top-right-radius: var(--bs-nav-tabs-border-radius); } + .nav-tabs .nav-link:hover, .nav-tabs .nav-link:focus { + isolation: isolate; + border-color: var(--bs-nav-tabs-link-hover-border-color); } + .nav-tabs .nav-link.active, + .nav-tabs .nav-item.show .nav-link { + color: var(--bs-nav-tabs-link-active-color); + background-color: var(--bs-nav-tabs-link-active-bg); + border-color: var(--bs-nav-tabs-link-active-border-color); } + .nav-tabs .dropdown-menu { + margin-top: calc(-1 * var(--bs-nav-tabs-border-width)); + border-top-left-radius: 0; + border-top-right-radius: 0; } + +.nav-pills { + --bs-nav-pills-border-radius: var(--bs-border-radius); + --bs-nav-pills-link-active-color: #fff; + --bs-nav-pills-link-active-bg: #189DD0; } + .nav-pills .nav-link { + border-radius: var(--bs-nav-pills-border-radius); } + .nav-pills .nav-link.active, + .nav-pills .show > .nav-link { + color: var(--bs-nav-pills-link-active-color); + background-color: var(--bs-nav-pills-link-active-bg); + background-image: var(--bs-gradient); } + +.nav-underline { + --bs-nav-underline-gap: 1rem; + --bs-nav-underline-border-width: 0.125rem; + --bs-nav-underline-link-active-color: var(--bs-emphasis-color); + gap: var(--bs-nav-underline-gap); } + .nav-underline .nav-link { + padding-right: 0; + padding-left: 0; + border-bottom: var(--bs-nav-underline-border-width) solid transparent; } + .nav-underline .nav-link:hover, .nav-underline .nav-link:focus { + border-bottom-color: currentcolor; } + .nav-underline .nav-link.active, + .nav-underline .show > .nav-link { + font-weight: 700; + color: var(--bs-nav-underline-link-active-color); + border-bottom-color: currentcolor; } + +.nav-fill > .nav-link, +.nav-fill .nav-item { + flex: 1 1 auto; + text-align: center; } + +.nav-justified > .nav-link, +.nav-justified .nav-item { + flex-basis: 0; + flex-grow: 1; + text-align: center; } + +.nav-fill .nav-item .nav-link, +.nav-justified .nav-item .nav-link { + width: 100%; } + +.tab-content > .tab-pane { + display: none; } + +.tab-content > .active { + display: block; } + +.navbar, .td-navbar { + --bs-navbar-padding-x: 0; + --bs-navbar-padding-y: 0.5rem; + --bs-navbar-color: rgba(var(--bs-emphasis-color-rgb), 0.65); + --bs-navbar-hover-color: rgba(var(--bs-emphasis-color-rgb), 0.8); + --bs-navbar-disabled-color: rgba(var(--bs-emphasis-color-rgb), 0.3); + --bs-navbar-active-color: rgba(var(--bs-emphasis-color-rgb), 1); + --bs-navbar-brand-padding-y: 0.3125rem; + --bs-navbar-brand-margin-end: 1rem; + --bs-navbar-brand-font-size: 1.25rem; + --bs-navbar-brand-color: rgba(var(--bs-emphasis-color-rgb), 1); + --bs-navbar-brand-hover-color: rgba(var(--bs-emphasis-color-rgb), 1); + --bs-navbar-nav-link-padding-x: 0.5rem; + --bs-navbar-toggler-padding-y: 0.25rem; + --bs-navbar-toggler-padding-x: 0.75rem; + --bs-navbar-toggler-font-size: 1.25rem; + --bs-navbar-toggler-icon-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%2833, 37, 41, 0.75%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e"); + --bs-navbar-toggler-border-color: rgba(var(--bs-emphasis-color-rgb), 0.15); + --bs-navbar-toggler-border-radius: var(--bs-border-radius); + --bs-navbar-toggler-focus-width: 0.25rem; + --bs-navbar-toggler-transition: box-shadow 0.15s ease-in-out; + position: relative; + display: flex; + flex-wrap: wrap; + align-items: center; + justify-content: space-between; + padding: var(--bs-navbar-padding-y) var(--bs-navbar-padding-x); + background-image: var(--bs-gradient); } + .navbar > .container, .td-navbar > .container, + .navbar > .container-fluid, + .td-navbar > .container-fluid, + .navbar > .container-sm, + .td-navbar > .container-sm, + .navbar > .container-md, + .td-navbar > .container-md, + .navbar > .container-lg, + .td-navbar > .container-lg, + .navbar > .container-xl, + .td-navbar > .container-xl, + .navbar > .container-xxl, + .td-navbar > .container-xxl { + display: flex; + flex-wrap: inherit; + align-items: center; + justify-content: space-between; } + +.navbar-brand { + padding-top: var(--bs-navbar-brand-padding-y); + padding-bottom: var(--bs-navbar-brand-padding-y); + margin-right: var(--bs-navbar-brand-margin-end); + font-size: var(--bs-navbar-brand-font-size); + color: var(--bs-navbar-brand-color); + text-decoration: none; + white-space: nowrap; } + .navbar-brand:hover, .navbar-brand:focus { + color: var(--bs-navbar-brand-hover-color); } + +.navbar-nav { + --bs-nav-link-padding-x: 0; + --bs-nav-link-padding-y: 0.5rem; + --bs-nav-link-font-weight: ; + --bs-nav-link-color: var(--bs-navbar-color); + --bs-nav-link-hover-color: var(--bs-navbar-hover-color); + --bs-nav-link-disabled-color: var(--bs-navbar-disabled-color); + display: flex; + flex-direction: column; + padding-left: 0; + margin-bottom: 0; + list-style: none; } + .navbar-nav .nav-link.active, .navbar-nav .nav-link.show { + color: var(--bs-navbar-active-color); } + .navbar-nav .dropdown-menu { + position: static; } + +.navbar-text { + padding-top: 0.5rem; + padding-bottom: 0.5rem; + color: var(--bs-navbar-color); } + .navbar-text a, + .navbar-text a:hover, + .navbar-text a:focus { + color: var(--bs-navbar-active-color); } + +.navbar-collapse { + flex-basis: 100%; + flex-grow: 1; + align-items: center; } + +.navbar-toggler { + padding: var(--bs-navbar-toggler-padding-y) var(--bs-navbar-toggler-padding-x); + font-size: var(--bs-navbar-toggler-font-size); + line-height: 1; + color: var(--bs-navbar-color); + background-color: transparent; + border: var(--bs-border-width) solid var(--bs-navbar-toggler-border-color); + border-radius: var(--bs-navbar-toggler-border-radius); + transition: var(--bs-navbar-toggler-transition); } + @media (prefers-reduced-motion: reduce) { + .navbar-toggler { + transition: none; } } + .navbar-toggler:hover { + text-decoration: none; } + .navbar-toggler:focus { + text-decoration: none; + outline: 0; + box-shadow: 0 0 0 var(--bs-navbar-toggler-focus-width); } + +.navbar-toggler-icon { + display: inline-block; + width: 1.5em; + height: 1.5em; + vertical-align: middle; + background-image: var(--bs-navbar-toggler-icon-bg); + background-repeat: no-repeat; + background-position: center; + background-size: 100%; } + +.navbar-nav-scroll { + max-height: var(--bs-scroll-height, 75vh); + overflow-y: auto; } + +@media (min-width: 576px) { + .navbar-expand-sm { + flex-wrap: nowrap; + justify-content: flex-start; } + .navbar-expand-sm .navbar-nav { + flex-direction: row; } + .navbar-expand-sm .navbar-nav .dropdown-menu { + position: absolute; } + .navbar-expand-sm .navbar-nav .nav-link { + padding-right: var(--bs-navbar-nav-link-padding-x); + padding-left: var(--bs-navbar-nav-link-padding-x); } + .navbar-expand-sm .navbar-nav-scroll { + overflow: visible; } + .navbar-expand-sm .navbar-collapse { + display: flex !important; + flex-basis: auto; } + .navbar-expand-sm .navbar-toggler { + display: none; } + .navbar-expand-sm .offcanvas { + position: static; + z-index: auto; + flex-grow: 1; + width: auto !important; + height: auto !important; + visibility: visible !important; + background-color: transparent !important; + border: 0 !important; + transform: none !important; + box-shadow: none; + transition: none; } + .navbar-expand-sm .offcanvas .offcanvas-header { + display: none; } + .navbar-expand-sm .offcanvas .offcanvas-body { + display: flex; + flex-grow: 0; + padding: 0; + overflow-y: visible; } } + +@media (min-width: 768px) { + .navbar-expand-md { + flex-wrap: nowrap; + justify-content: flex-start; } + .navbar-expand-md .navbar-nav { + flex-direction: row; } + .navbar-expand-md .navbar-nav .dropdown-menu { + position: absolute; } + .navbar-expand-md .navbar-nav .nav-link { + padding-right: var(--bs-navbar-nav-link-padding-x); + padding-left: var(--bs-navbar-nav-link-padding-x); } + .navbar-expand-md .navbar-nav-scroll { + overflow: visible; } + .navbar-expand-md .navbar-collapse { + display: flex !important; + flex-basis: auto; } + .navbar-expand-md .navbar-toggler { + display: none; } + .navbar-expand-md .offcanvas { + position: static; + z-index: auto; + flex-grow: 1; + width: auto !important; + height: auto !important; + visibility: visible !important; + background-color: transparent !important; + border: 0 !important; + transform: none !important; + box-shadow: none; + transition: none; } + .navbar-expand-md .offcanvas .offcanvas-header { + display: none; } + .navbar-expand-md .offcanvas .offcanvas-body { + display: flex; + flex-grow: 0; + padding: 0; + overflow-y: visible; } } + +@media (min-width: 992px) { + .navbar-expand-lg { + flex-wrap: nowrap; + justify-content: flex-start; } + .navbar-expand-lg .navbar-nav { + flex-direction: row; } + .navbar-expand-lg .navbar-nav .dropdown-menu { + position: absolute; } + .navbar-expand-lg .navbar-nav .nav-link { + padding-right: var(--bs-navbar-nav-link-padding-x); + padding-left: var(--bs-navbar-nav-link-padding-x); } + .navbar-expand-lg .navbar-nav-scroll { + overflow: visible; } + .navbar-expand-lg .navbar-collapse { + display: flex !important; + flex-basis: auto; } + .navbar-expand-lg .navbar-toggler { + display: none; } + .navbar-expand-lg .offcanvas { + position: static; + z-index: auto; + flex-grow: 1; + width: auto !important; + height: auto !important; + visibility: visible !important; + background-color: transparent !important; + border: 0 !important; + transform: none !important; + box-shadow: none; + transition: none; } + .navbar-expand-lg .offcanvas .offcanvas-header { + display: none; } + .navbar-expand-lg .offcanvas .offcanvas-body { + display: flex; + flex-grow: 0; + padding: 0; + overflow-y: visible; } } + +@media (min-width: 1200px) { + .navbar-expand-xl { + flex-wrap: nowrap; + justify-content: flex-start; } + .navbar-expand-xl .navbar-nav { + flex-direction: row; } + .navbar-expand-xl .navbar-nav .dropdown-menu { + position: absolute; } + .navbar-expand-xl .navbar-nav .nav-link { + padding-right: var(--bs-navbar-nav-link-padding-x); + padding-left: var(--bs-navbar-nav-link-padding-x); } + .navbar-expand-xl .navbar-nav-scroll { + overflow: visible; } + .navbar-expand-xl .navbar-collapse { + display: flex !important; + flex-basis: auto; } + .navbar-expand-xl .navbar-toggler { + display: none; } + .navbar-expand-xl .offcanvas { + position: static; + z-index: auto; + flex-grow: 1; + width: auto !important; + height: auto !important; + visibility: visible !important; + background-color: transparent !important; + border: 0 !important; + transform: none !important; + box-shadow: none; + transition: none; } + .navbar-expand-xl .offcanvas .offcanvas-header { + display: none; } + .navbar-expand-xl .offcanvas .offcanvas-body { + display: flex; + flex-grow: 0; + padding: 0; + overflow-y: visible; } } + +@media (min-width: 1400px) { + .navbar-expand-xxl { + flex-wrap: nowrap; + justify-content: flex-start; } + .navbar-expand-xxl .navbar-nav { + flex-direction: row; } + .navbar-expand-xxl .navbar-nav .dropdown-menu { + position: absolute; } + .navbar-expand-xxl .navbar-nav .nav-link { + padding-right: var(--bs-navbar-nav-link-padding-x); + padding-left: var(--bs-navbar-nav-link-padding-x); } + .navbar-expand-xxl .navbar-nav-scroll { + overflow: visible; } + .navbar-expand-xxl .navbar-collapse { + display: flex !important; + flex-basis: auto; } + .navbar-expand-xxl .navbar-toggler { + display: none; } + .navbar-expand-xxl .offcanvas { + position: static; + z-index: auto; + flex-grow: 1; + width: auto !important; + height: auto !important; + visibility: visible !important; + background-color: transparent !important; + border: 0 !important; + transform: none !important; + box-shadow: none; + transition: none; } + .navbar-expand-xxl .offcanvas .offcanvas-header { + display: none; } + .navbar-expand-xxl .offcanvas .offcanvas-body { + display: flex; + flex-grow: 0; + padding: 0; + overflow-y: visible; } } + +.navbar-expand, .td-navbar { + flex-wrap: nowrap; + justify-content: flex-start; } + .navbar-expand .navbar-nav, .td-navbar .navbar-nav { + flex-direction: row; } + .navbar-expand .navbar-nav .dropdown-menu, .td-navbar .navbar-nav .dropdown-menu { + position: absolute; } + .navbar-expand .navbar-nav .nav-link, .td-navbar .navbar-nav .nav-link { + padding-right: var(--bs-navbar-nav-link-padding-x); + padding-left: var(--bs-navbar-nav-link-padding-x); } + .navbar-expand .navbar-nav-scroll, .td-navbar .navbar-nav-scroll { + overflow: visible; } + .navbar-expand .navbar-collapse, .td-navbar .navbar-collapse { + display: flex !important; + flex-basis: auto; } + .navbar-expand .navbar-toggler, .td-navbar .navbar-toggler { + display: none; } + .navbar-expand .offcanvas, .td-navbar .offcanvas { + position: static; + z-index: auto; + flex-grow: 1; + width: auto !important; + height: auto !important; + visibility: visible !important; + background-color: transparent !important; + border: 0 !important; + transform: none !important; + box-shadow: none; + transition: none; } + .navbar-expand .offcanvas .offcanvas-header, .td-navbar .offcanvas .offcanvas-header { + display: none; } + .navbar-expand .offcanvas .offcanvas-body, .td-navbar .offcanvas .offcanvas-body { + display: flex; + flex-grow: 0; + padding: 0; + overflow-y: visible; } + +.navbar-dark, +.navbar[data-bs-theme="dark"], +[data-bs-theme="dark"].td-navbar { + --bs-navbar-color: rgba(255, 255, 255, 0.55); + --bs-navbar-hover-color: rgba(255, 255, 255, 0.75); + --bs-navbar-disabled-color: rgba(255, 255, 255, 0.25); + --bs-navbar-active-color: #fff; + --bs-navbar-brand-color: #fff; + --bs-navbar-brand-hover-color: #fff; + --bs-navbar-toggler-border-color: rgba(255, 255, 255, 0.1); + --bs-navbar-toggler-icon-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%28255, 255, 255, 0.55%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e"); } + +[data-bs-theme="dark"] .navbar-toggler-icon { + --bs-navbar-toggler-icon-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 30 30'%3e%3cpath stroke='rgba%28255, 255, 255, 0.55%29' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e"); } + +.card { + --bs-card-spacer-y: 1rem; + --bs-card-spacer-x: 1rem; + --bs-card-title-spacer-y: 0.5rem; + --bs-card-title-color: ; + --bs-card-subtitle-color: ; + --bs-card-border-width: var(--bs-border-width); + --bs-card-border-color: var(--bs-border-color-translucent); + --bs-card-border-radius: var(--bs-border-radius); + --bs-card-box-shadow: ; + --bs-card-inner-border-radius: calc(var(--bs-border-radius) - (var(--bs-border-width))); + --bs-card-cap-padding-y: 0.5rem; + --bs-card-cap-padding-x: 1rem; + --bs-card-cap-bg: rgba(var(--bs-body-color-rgb), 0.03); + --bs-card-cap-color: ; + --bs-card-height: ; + --bs-card-color: ; + --bs-card-bg: var(--bs-body-bg); + --bs-card-img-overlay-padding: 1rem; + --bs-card-group-margin: 0.75rem; + position: relative; + display: flex; + flex-direction: column; + min-width: 0; + height: var(--bs-card-height); + color: var(--bs-body-color); + word-wrap: break-word; + background-color: var(--bs-card-bg); + background-clip: border-box; + border: var(--bs-card-border-width) solid var(--bs-card-border-color); + border-radius: var(--bs-card-border-radius); + box-shadow: var(--bs-card-box-shadow); } + .card > hr { + margin-right: 0; + margin-left: 0; } + .card > .list-group { + border-top: inherit; + border-bottom: inherit; } + .card > .list-group:first-child { + border-top-width: 0; + border-top-left-radius: var(--bs-card-inner-border-radius); + border-top-right-radius: var(--bs-card-inner-border-radius); } + .card > .list-group:last-child { + border-bottom-width: 0; + border-bottom-right-radius: var(--bs-card-inner-border-radius); + border-bottom-left-radius: var(--bs-card-inner-border-radius); } + .card > .card-header + .list-group, + .card > .list-group + .card-footer { + border-top: 0; } + +.card-body { + flex: 1 1 auto; + padding: var(--bs-card-spacer-y) var(--bs-card-spacer-x); + color: var(--bs-card-color); } + +.card-title { + margin-bottom: var(--bs-card-title-spacer-y); + color: var(--bs-card-title-color); } + +.card-subtitle { + margin-top: calc(-.5 * var(--bs-card-title-spacer-y)); + margin-bottom: 0; + color: var(--bs-card-subtitle-color); } + +.card-text:last-child { + margin-bottom: 0; } + +.card-link + .card-link { + margin-left: var(--bs-card-spacer-x); } + +.card-header { + padding: var(--bs-card-cap-padding-y) var(--bs-card-cap-padding-x); + margin-bottom: 0; + color: var(--bs-card-cap-color); + background-color: var(--bs-card-cap-bg); + border-bottom: var(--bs-card-border-width) solid var(--bs-card-border-color); } + .card-header:first-child { + border-radius: var(--bs-card-inner-border-radius) var(--bs-card-inner-border-radius) 0 0; } + +.card-footer { + padding: var(--bs-card-cap-padding-y) var(--bs-card-cap-padding-x); + color: var(--bs-card-cap-color); + background-color: var(--bs-card-cap-bg); + border-top: var(--bs-card-border-width) solid var(--bs-card-border-color); } + .card-footer:last-child { + border-radius: 0 0 var(--bs-card-inner-border-radius) var(--bs-card-inner-border-radius); } + +.card-header-tabs { + margin-right: calc(-.5 * var(--bs-card-cap-padding-x)); + margin-bottom: calc(-1 * var(--bs-card-cap-padding-y)); + margin-left: calc(-.5 * var(--bs-card-cap-padding-x)); + border-bottom: 0; } + .card-header-tabs .nav-link.active { + background-color: var(--bs-card-bg); + border-bottom-color: var(--bs-card-bg); } + +.card-header-pills { + margin-right: calc(-.5 * var(--bs-card-cap-padding-x)); + margin-left: calc(-.5 * var(--bs-card-cap-padding-x)); } + +.card-img-overlay { + position: absolute; + top: 0; + right: 0; + bottom: 0; + left: 0; + padding: var(--bs-card-img-overlay-padding); + border-radius: var(--bs-card-inner-border-radius); } + +.card-img, +.card-img-top, +.card-img-bottom { + width: 100%; } + +.card-img, +.card-img-top { + border-top-left-radius: var(--bs-card-inner-border-radius); + border-top-right-radius: var(--bs-card-inner-border-radius); } + +.card-img, +.card-img-bottom { + border-bottom-right-radius: var(--bs-card-inner-border-radius); + border-bottom-left-radius: var(--bs-card-inner-border-radius); } + +.card-group > .card { + margin-bottom: var(--bs-card-group-margin); } + +@media (min-width: 576px) { + .card-group { + display: flex; + flex-flow: row wrap; } + .card-group > .card { + flex: 1 0 0%; + margin-bottom: 0; } + .card-group > .card + .card { + margin-left: 0; + border-left: 0; } + .card-group > .card:not(:last-child) { + border-top-right-radius: 0; + border-bottom-right-radius: 0; } + .card-group > .card:not(:last-child) .card-img-top, + .card-group > .card:not(:last-child) .card-header { + border-top-right-radius: 0; } + .card-group > .card:not(:last-child) .card-img-bottom, + .card-group > .card:not(:last-child) .card-footer { + border-bottom-right-radius: 0; } + .card-group > .card:not(:first-child) { + border-top-left-radius: 0; + border-bottom-left-radius: 0; } + .card-group > .card:not(:first-child) .card-img-top, + .card-group > .card:not(:first-child) .card-header { + border-top-left-radius: 0; } + .card-group > .card:not(:first-child) .card-img-bottom, + .card-group > .card:not(:first-child) .card-footer { + border-bottom-left-radius: 0; } } + +.accordion { + --bs-accordion-color: var(--bs-body-color); + --bs-accordion-bg: var(--bs-body-bg); + --bs-accordion-transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, border-radius 0.15s ease; + --bs-accordion-border-color: var(--bs-border-color); + --bs-accordion-border-width: var(--bs-border-width); + --bs-accordion-border-radius: var(--bs-border-radius); + --bs-accordion-inner-border-radius: calc(var(--bs-border-radius) - (var(--bs-border-width))); + --bs-accordion-btn-padding-x: 1.25rem; + --bs-accordion-btn-padding-y: 1rem; + --bs-accordion-btn-color: var(--bs-body-color); + --bs-accordion-btn-bg: var(--bs-accordion-bg); + --bs-accordion-btn-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='none' stroke='%23212529' stroke-linecap='round' stroke-linejoin='round'%3e%3cpath d='M2 5L8 11L14 5'/%3e%3c/svg%3e"); + --bs-accordion-btn-icon-width: 1.25rem; + --bs-accordion-btn-icon-transform: rotate(-180deg); + --bs-accordion-btn-icon-transition: transform 0.2s ease-in-out; + --bs-accordion-btn-active-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='none' stroke='%230a3f53' stroke-linecap='round' stroke-linejoin='round'%3e%3cpath d='M2 5L8 11L14 5'/%3e%3c/svg%3e"); + --bs-accordion-btn-focus-box-shadow: 0 0 0 0.25rem rgba(24, 157, 208, 0.25); + --bs-accordion-body-padding-x: 1.25rem; + --bs-accordion-body-padding-y: 1rem; + --bs-accordion-active-color: var(--bs-primary-text-emphasis); + --bs-accordion-active-bg: var(--bs-primary-bg-subtle); } + +.accordion-button { + position: relative; + display: flex; + align-items: center; + width: 100%; + padding: var(--bs-accordion-btn-padding-y) var(--bs-accordion-btn-padding-x); + font-size: 1rem; + color: var(--bs-accordion-btn-color); + text-align: left; + background-color: var(--bs-accordion-btn-bg); + border: 0; + border-radius: 0; + overflow-anchor: none; + transition: var(--bs-accordion-transition); } + @media (prefers-reduced-motion: reduce) { + .accordion-button { + transition: none; } } + .accordion-button:not(.collapsed) { + color: var(--bs-accordion-active-color); + background-color: var(--bs-accordion-active-bg); + box-shadow: inset 0 calc(-1 * var(--bs-accordion-border-width)) 0 var(--bs-accordion-border-color); } + .accordion-button:not(.collapsed)::after { + background-image: var(--bs-accordion-btn-active-icon); + transform: var(--bs-accordion-btn-icon-transform); } + .accordion-button::after { + flex-shrink: 0; + width: var(--bs-accordion-btn-icon-width); + height: var(--bs-accordion-btn-icon-width); + margin-left: auto; + content: ""; + background-image: var(--bs-accordion-btn-icon); + background-repeat: no-repeat; + background-size: var(--bs-accordion-btn-icon-width); + transition: var(--bs-accordion-btn-icon-transition); } + @media (prefers-reduced-motion: reduce) { + .accordion-button::after { + transition: none; } } + .accordion-button:hover { + z-index: 2; } + .accordion-button:focus { + z-index: 3; + outline: 0; + box-shadow: var(--bs-accordion-btn-focus-box-shadow); } + +.accordion-header { + margin-bottom: 0; } + +.accordion-item { + color: var(--bs-accordion-color); + background-color: var(--bs-accordion-bg); + border: var(--bs-accordion-border-width) solid var(--bs-accordion-border-color); } + .accordion-item:first-of-type { + border-top-left-radius: var(--bs-accordion-border-radius); + border-top-right-radius: var(--bs-accordion-border-radius); } + .accordion-item:first-of-type > .accordion-header .accordion-button { + border-top-left-radius: var(--bs-accordion-inner-border-radius); + border-top-right-radius: var(--bs-accordion-inner-border-radius); } + .accordion-item:not(:first-of-type) { + border-top: 0; } + .accordion-item:last-of-type { + border-bottom-right-radius: var(--bs-accordion-border-radius); + border-bottom-left-radius: var(--bs-accordion-border-radius); } + .accordion-item:last-of-type > .accordion-header .accordion-button.collapsed { + border-bottom-right-radius: var(--bs-accordion-inner-border-radius); + border-bottom-left-radius: var(--bs-accordion-inner-border-radius); } + .accordion-item:last-of-type > .accordion-collapse { + border-bottom-right-radius: var(--bs-accordion-border-radius); + border-bottom-left-radius: var(--bs-accordion-border-radius); } + +.accordion-body { + padding: var(--bs-accordion-body-padding-y) var(--bs-accordion-body-padding-x); } + +.accordion-flush > .accordion-item { + border-right: 0; + border-left: 0; + border-radius: 0; } + .accordion-flush > .accordion-item:first-child { + border-top: 0; } + .accordion-flush > .accordion-item:last-child { + border-bottom: 0; } + .accordion-flush > .accordion-item > .accordion-header .accordion-button, .accordion-flush > .accordion-item > .accordion-header .accordion-button.collapsed { + border-radius: 0; } + .accordion-flush > .accordion-item > .accordion-collapse { + border-radius: 0; } + +[data-bs-theme="dark"] .accordion-button::after { + --bs-accordion-btn-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%2374c4e3'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e"); + --bs-accordion-btn-active-icon: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%2374c4e3'%3e%3cpath fill-rule='evenodd' d='M1.646 4.646a.5.5 0 0 1 .708 0L8 10.293l5.646-5.647a.5.5 0 0 1 .708.708l-6 6a.5.5 0 0 1-.708 0l-6-6a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e"); } + +.breadcrumb { + --bs-breadcrumb-padding-x: 0; + --bs-breadcrumb-padding-y: 0; + --bs-breadcrumb-margin-bottom: 1rem; + --bs-breadcrumb-bg: ; + --bs-breadcrumb-border-radius: ; + --bs-breadcrumb-divider-color: var(--bs-secondary-color); + --bs-breadcrumb-item-padding-x: 0.5rem; + --bs-breadcrumb-item-active-color: var(--bs-secondary-color); + display: flex; + flex-wrap: wrap; + padding: var(--bs-breadcrumb-padding-y) var(--bs-breadcrumb-padding-x); + margin-bottom: var(--bs-breadcrumb-margin-bottom); + font-size: var(--bs-breadcrumb-font-size); + list-style: none; + background-color: var(--bs-breadcrumb-bg); + border-radius: var(--bs-breadcrumb-border-radius); } + +.breadcrumb-item + .breadcrumb-item { + padding-left: var(--bs-breadcrumb-item-padding-x); } + .breadcrumb-item + .breadcrumb-item::before { + float: left; + padding-right: var(--bs-breadcrumb-item-padding-x); + color: var(--bs-breadcrumb-divider-color); + content: var(--bs-breadcrumb-divider, "/") /* rtl: var(--bs-breadcrumb-divider, "/") */; } + +.breadcrumb-item.active { + color: var(--bs-breadcrumb-item-active-color); } + +.pagination { + --bs-pagination-padding-x: 0.75rem; + --bs-pagination-padding-y: 0.375rem; + --bs-pagination-font-size: 1rem; + --bs-pagination-color: #6c757d; + --bs-pagination-bg: var(--bs-body-bg); + --bs-pagination-border-width: var(--bs-border-width); + --bs-pagination-border-color: var(--bs-border-color); + --bs-pagination-border-radius: var(--bs-border-radius); + --bs-pagination-hover-color: var(--bs-link-hover-color); + --bs-pagination-hover-bg: var(--bs-tertiary-bg); + --bs-pagination-hover-border-color: var(--bs-border-color); + --bs-pagination-focus-color: var(--bs-link-hover-color); + --bs-pagination-focus-bg: var(--bs-secondary-bg); + --bs-pagination-focus-box-shadow: 0 0 0 0.25rem rgba(24, 157, 208, 0.25); + --bs-pagination-active-color: #fff; + --bs-pagination-active-bg: #189DD0; + --bs-pagination-active-border-color: #189DD0; + --bs-pagination-disabled-color: #dee2e6; + --bs-pagination-disabled-bg: var(--bs-secondary-bg); + --bs-pagination-disabled-border-color: var(--bs-border-color); + display: flex; + padding-left: 0; + list-style: none; } + +.page-link { + position: relative; + display: block; + padding: var(--bs-pagination-padding-y) var(--bs-pagination-padding-x); + font-size: var(--bs-pagination-font-size); + color: var(--bs-pagination-color); + text-decoration: none; + background-color: var(--bs-pagination-bg); + border: var(--bs-pagination-border-width) solid var(--bs-pagination-border-color); + transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; } + @media (prefers-reduced-motion: reduce) { + .page-link { + transition: none; } } + .page-link:hover { + z-index: 2; + color: var(--bs-pagination-hover-color); + background-color: var(--bs-pagination-hover-bg); + border-color: var(--bs-pagination-hover-border-color); } + .page-link:focus { + z-index: 3; + color: var(--bs-pagination-focus-color); + background-color: var(--bs-pagination-focus-bg); + outline: 0; + box-shadow: var(--bs-pagination-focus-box-shadow); } + .page-link.active, .active > .page-link { + z-index: 3; + color: var(--bs-pagination-active-color); + background-color: var(--bs-pagination-active-bg); + background-image: var(--bs-gradient); + border-color: var(--bs-pagination-active-border-color); } + .page-link.disabled, .disabled > .page-link { + color: var(--bs-pagination-disabled-color); + pointer-events: none; + background-color: var(--bs-pagination-disabled-bg); + border-color: var(--bs-pagination-disabled-border-color); } + +.page-item:not(:first-child) .page-link { + margin-left: calc(var(--bs-border-width) * -1); } + +.page-item:first-child .page-link { + border-top-left-radius: var(--bs-pagination-border-radius); + border-bottom-left-radius: var(--bs-pagination-border-radius); } + +.page-item:last-child .page-link { + border-top-right-radius: var(--bs-pagination-border-radius); + border-bottom-right-radius: var(--bs-pagination-border-radius); } + +.pagination-lg { + --bs-pagination-padding-x: 1.5rem; + --bs-pagination-padding-y: 0.75rem; + --bs-pagination-font-size: 1.25rem; + --bs-pagination-border-radius: var(--bs-border-radius-lg); } + +.pagination-sm { + --bs-pagination-padding-x: 0.5rem; + --bs-pagination-padding-y: 0.25rem; + --bs-pagination-font-size: 0.875rem; + --bs-pagination-border-radius: var(--bs-border-radius-sm); } + +.badge { + --bs-badge-padding-x: 0.65em; + --bs-badge-padding-y: 0.35em; + --bs-badge-font-size: 0.75em; + --bs-badge-font-weight: 700; + --bs-badge-color: #fff; + --bs-badge-border-radius: var(--bs-border-radius); + display: inline-block; + padding: var(--bs-badge-padding-y) var(--bs-badge-padding-x); + font-size: var(--bs-badge-font-size); + font-weight: var(--bs-badge-font-weight); + line-height: 1; + color: var(--bs-badge-color); + text-align: center; + white-space: nowrap; + vertical-align: baseline; + border-radius: var(--bs-badge-border-radius); + background-image: var(--bs-gradient); } + .badge:empty { + display: none; } + +.btn .badge, div.drawio button .badge, .td-blog .td-rss-button .badge { + position: relative; + top: -1px; } + +.alert { + --bs-alert-bg: transparent; + --bs-alert-padding-x: 1rem; + --bs-alert-padding-y: 1rem; + --bs-alert-margin-bottom: 1rem; + --bs-alert-color: inherit; + --bs-alert-border-color: transparent; + --bs-alert-border: var(--bs-border-width) solid var(--bs-alert-border-color); + --bs-alert-border-radius: var(--bs-border-radius); + --bs-alert-link-color: inherit; + position: relative; + padding: var(--bs-alert-padding-y) var(--bs-alert-padding-x); + margin-bottom: var(--bs-alert-margin-bottom); + color: var(--bs-alert-color); + background-color: var(--bs-alert-bg); + border: var(--bs-alert-border); + border-radius: var(--bs-alert-border-radius); } + +.alert-heading { + color: inherit; } + +.alert-link { + font-weight: 700; + color: var(--bs-alert-link-color); } + +.alert-dismissible { + padding-right: 3rem; } + .alert-dismissible .btn-close { + position: absolute; + top: 0; + right: 0; + z-index: 2; + padding: 1.25rem 1rem; } + +.alert-primary, .pageinfo-primary { + --bs-alert-color: var(--bs-primary-text-emphasis); + --bs-alert-bg: var(--bs-primary-bg-subtle); + --bs-alert-border-color: var(--bs-primary-border-subtle); + --bs-alert-link-color: var(--bs-primary-text-emphasis); } + +.alert-secondary, .pageinfo-secondary { + --bs-alert-color: var(--bs-secondary-text-emphasis); + --bs-alert-bg: var(--bs-secondary-bg-subtle); + --bs-alert-border-color: var(--bs-secondary-border-subtle); + --bs-alert-link-color: var(--bs-secondary-text-emphasis); } + +.alert-success, .pageinfo-success { + --bs-alert-color: var(--bs-success-text-emphasis); + --bs-alert-bg: var(--bs-success-bg-subtle); + --bs-alert-border-color: var(--bs-success-border-subtle); + --bs-alert-link-color: var(--bs-success-text-emphasis); } + +.alert-info, .pageinfo-info { + --bs-alert-color: var(--bs-info-text-emphasis); + --bs-alert-bg: var(--bs-info-bg-subtle); + --bs-alert-border-color: var(--bs-info-border-subtle); + --bs-alert-link-color: var(--bs-info-text-emphasis); } + +.alert-warning, .pageinfo-warning { + --bs-alert-color: var(--bs-warning-text-emphasis); + --bs-alert-bg: var(--bs-warning-bg-subtle); + --bs-alert-border-color: var(--bs-warning-border-subtle); + --bs-alert-link-color: var(--bs-warning-text-emphasis); } + +.alert-danger, .pageinfo-danger { + --bs-alert-color: var(--bs-danger-text-emphasis); + --bs-alert-bg: var(--bs-danger-bg-subtle); + --bs-alert-border-color: var(--bs-danger-border-subtle); + --bs-alert-link-color: var(--bs-danger-text-emphasis); } + +.alert-light, .pageinfo-light { + --bs-alert-color: var(--bs-light-text-emphasis); + --bs-alert-bg: var(--bs-light-bg-subtle); + --bs-alert-border-color: var(--bs-light-border-subtle); + --bs-alert-link-color: var(--bs-light-text-emphasis); } + +.alert-dark, .pageinfo-dark { + --bs-alert-color: var(--bs-dark-text-emphasis); + --bs-alert-bg: var(--bs-dark-bg-subtle); + --bs-alert-border-color: var(--bs-dark-border-subtle); + --bs-alert-link-color: var(--bs-dark-text-emphasis); } + +@keyframes progress-bar-stripes { + 0% { + background-position-x: 1rem; } } + +.progress, +.progress-stacked { + --bs-progress-height: 1rem; + --bs-progress-font-size: 0.75rem; + --bs-progress-bg: var(--bs-secondary-bg); + --bs-progress-border-radius: var(--bs-border-radius); + --bs-progress-box-shadow: var(--bs-box-shadow-inset); + --bs-progress-bar-color: #fff; + --bs-progress-bar-bg: #189DD0; + --bs-progress-bar-transition: width 0.6s ease; + display: flex; + height: var(--bs-progress-height); + overflow: hidden; + font-size: var(--bs-progress-font-size); + background-color: var(--bs-progress-bg); + border-radius: var(--bs-progress-border-radius); + box-shadow: var(--bs-progress-box-shadow); } + +.progress-bar { + display: flex; + flex-direction: column; + justify-content: center; + overflow: hidden; + color: var(--bs-progress-bar-color); + text-align: center; + white-space: nowrap; + background-color: var(--bs-progress-bar-bg); + transition: var(--bs-progress-bar-transition); } + @media (prefers-reduced-motion: reduce) { + .progress-bar { + transition: none; } } +.progress-bar-striped { + background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); + background-size: var(--bs-progress-height) var(--bs-progress-height); } + +.progress-stacked > .progress { + overflow: visible; } + +.progress-stacked > .progress > .progress-bar { + width: 100%; } + +.progress-bar-animated { + animation: 1s linear infinite progress-bar-stripes; } + @media (prefers-reduced-motion: reduce) { + .progress-bar-animated { + animation: none; } } +.list-group { + --bs-list-group-color: var(--bs-body-color); + --bs-list-group-bg: var(--bs-body-bg); + --bs-list-group-border-color: var(--bs-border-color); + --bs-list-group-border-width: var(--bs-border-width); + --bs-list-group-border-radius: var(--bs-border-radius); + --bs-list-group-item-padding-x: 1rem; + --bs-list-group-item-padding-y: 0.5rem; + --bs-list-group-action-color: var(--bs-secondary-color); + --bs-list-group-action-hover-color: var(--bs-emphasis-color); + --bs-list-group-action-hover-bg: var(--bs-tertiary-bg); + --bs-list-group-action-active-color: var(--bs-body-color); + --bs-list-group-action-active-bg: var(--bs-secondary-bg); + --bs-list-group-disabled-color: var(--bs-secondary-color); + --bs-list-group-disabled-bg: var(--bs-body-bg); + --bs-list-group-active-color: #fff; + --bs-list-group-active-bg: #189DD0; + --bs-list-group-active-border-color: #189DD0; + display: flex; + flex-direction: column; + padding-left: 0; + margin-bottom: 0; + border-radius: var(--bs-list-group-border-radius); } + +.list-group-numbered { + list-style-type: none; + counter-reset: section; } + .list-group-numbered > .list-group-item::before { + content: counters(section, ".") ". "; + counter-increment: section; } + +.list-group-item-action { + width: 100%; + color: var(--bs-list-group-action-color); + text-align: inherit; } + .list-group-item-action:hover, .list-group-item-action:focus { + z-index: 1; + color: var(--bs-list-group-action-hover-color); + text-decoration: none; + background-color: var(--bs-list-group-action-hover-bg); } + .list-group-item-action:active { + color: var(--bs-list-group-action-active-color); + background-color: var(--bs-list-group-action-active-bg); } + +.list-group-item { + position: relative; + display: block; + padding: var(--bs-list-group-item-padding-y) var(--bs-list-group-item-padding-x); + color: var(--bs-list-group-color); + text-decoration: none; + background-color: var(--bs-list-group-bg); + border: var(--bs-list-group-border-width) solid var(--bs-list-group-border-color); } + .list-group-item:first-child { + border-top-left-radius: inherit; + border-top-right-radius: inherit; } + .list-group-item:last-child { + border-bottom-right-radius: inherit; + border-bottom-left-radius: inherit; } + .list-group-item.disabled, .list-group-item:disabled { + color: var(--bs-list-group-disabled-color); + pointer-events: none; + background-color: var(--bs-list-group-disabled-bg); } + .list-group-item.active { + z-index: 2; + color: var(--bs-list-group-active-color); + background-color: var(--bs-list-group-active-bg); + border-color: var(--bs-list-group-active-border-color); } + .list-group-item + .list-group-item { + border-top-width: 0; } + .list-group-item + .list-group-item.active { + margin-top: calc(-1 * var(--bs-list-group-border-width)); + border-top-width: var(--bs-list-group-border-width); } + +.list-group-horizontal { + flex-direction: row; } + .list-group-horizontal > .list-group-item:first-child:not(:last-child) { + border-bottom-left-radius: var(--bs-list-group-border-radius); + border-top-right-radius: 0; } + .list-group-horizontal > .list-group-item:last-child:not(:first-child) { + border-top-right-radius: var(--bs-list-group-border-radius); + border-bottom-left-radius: 0; } + .list-group-horizontal > .list-group-item.active { + margin-top: 0; } + .list-group-horizontal > .list-group-item + .list-group-item { + border-top-width: var(--bs-list-group-border-width); + border-left-width: 0; } + .list-group-horizontal > .list-group-item + .list-group-item.active { + margin-left: calc(-1 * var(--bs-list-group-border-width)); + border-left-width: var(--bs-list-group-border-width); } + +@media (min-width: 576px) { + .list-group-horizontal-sm { + flex-direction: row; } + .list-group-horizontal-sm > .list-group-item:first-child:not(:last-child) { + border-bottom-left-radius: var(--bs-list-group-border-radius); + border-top-right-radius: 0; } + .list-group-horizontal-sm > .list-group-item:last-child:not(:first-child) { + border-top-right-radius: var(--bs-list-group-border-radius); + border-bottom-left-radius: 0; } + .list-group-horizontal-sm > .list-group-item.active { + margin-top: 0; } + .list-group-horizontal-sm > .list-group-item + .list-group-item { + border-top-width: var(--bs-list-group-border-width); + border-left-width: 0; } + .list-group-horizontal-sm > .list-group-item + .list-group-item.active { + margin-left: calc(-1 * var(--bs-list-group-border-width)); + border-left-width: var(--bs-list-group-border-width); } } + +@media (min-width: 768px) { + .list-group-horizontal-md { + flex-direction: row; } + .list-group-horizontal-md > .list-group-item:first-child:not(:last-child) { + border-bottom-left-radius: var(--bs-list-group-border-radius); + border-top-right-radius: 0; } + .list-group-horizontal-md > .list-group-item:last-child:not(:first-child) { + border-top-right-radius: var(--bs-list-group-border-radius); + border-bottom-left-radius: 0; } + .list-group-horizontal-md > .list-group-item.active { + margin-top: 0; } + .list-group-horizontal-md > .list-group-item + .list-group-item { + border-top-width: var(--bs-list-group-border-width); + border-left-width: 0; } + .list-group-horizontal-md > .list-group-item + .list-group-item.active { + margin-left: calc(-1 * var(--bs-list-group-border-width)); + border-left-width: var(--bs-list-group-border-width); } } + +@media (min-width: 992px) { + .list-group-horizontal-lg { + flex-direction: row; } + .list-group-horizontal-lg > .list-group-item:first-child:not(:last-child) { + border-bottom-left-radius: var(--bs-list-group-border-radius); + border-top-right-radius: 0; } + .list-group-horizontal-lg > .list-group-item:last-child:not(:first-child) { + border-top-right-radius: var(--bs-list-group-border-radius); + border-bottom-left-radius: 0; } + .list-group-horizontal-lg > .list-group-item.active { + margin-top: 0; } + .list-group-horizontal-lg > .list-group-item + .list-group-item { + border-top-width: var(--bs-list-group-border-width); + border-left-width: 0; } + .list-group-horizontal-lg > .list-group-item + .list-group-item.active { + margin-left: calc(-1 * var(--bs-list-group-border-width)); + border-left-width: var(--bs-list-group-border-width); } } + +@media (min-width: 1200px) { + .list-group-horizontal-xl { + flex-direction: row; } + .list-group-horizontal-xl > .list-group-item:first-child:not(:last-child) { + border-bottom-left-radius: var(--bs-list-group-border-radius); + border-top-right-radius: 0; } + .list-group-horizontal-xl > .list-group-item:last-child:not(:first-child) { + border-top-right-radius: var(--bs-list-group-border-radius); + border-bottom-left-radius: 0; } + .list-group-horizontal-xl > .list-group-item.active { + margin-top: 0; } + .list-group-horizontal-xl > .list-group-item + .list-group-item { + border-top-width: var(--bs-list-group-border-width); + border-left-width: 0; } + .list-group-horizontal-xl > .list-group-item + .list-group-item.active { + margin-left: calc(-1 * var(--bs-list-group-border-width)); + border-left-width: var(--bs-list-group-border-width); } } + +@media (min-width: 1400px) { + .list-group-horizontal-xxl { + flex-direction: row; } + .list-group-horizontal-xxl > .list-group-item:first-child:not(:last-child) { + border-bottom-left-radius: var(--bs-list-group-border-radius); + border-top-right-radius: 0; } + .list-group-horizontal-xxl > .list-group-item:last-child:not(:first-child) { + border-top-right-radius: var(--bs-list-group-border-radius); + border-bottom-left-radius: 0; } + .list-group-horizontal-xxl > .list-group-item.active { + margin-top: 0; } + .list-group-horizontal-xxl > .list-group-item + .list-group-item { + border-top-width: var(--bs-list-group-border-width); + border-left-width: 0; } + .list-group-horizontal-xxl > .list-group-item + .list-group-item.active { + margin-left: calc(-1 * var(--bs-list-group-border-width)); + border-left-width: var(--bs-list-group-border-width); } } + +.list-group-flush { + border-radius: 0; } + .list-group-flush > .list-group-item { + border-width: 0 0 var(--bs-list-group-border-width); } + .list-group-flush > .list-group-item:last-child { + border-bottom-width: 0; } + +.list-group-item-primary { + --bs-list-group-color: var(--bs-primary-text-emphasis); + --bs-list-group-bg: var(--bs-primary-bg-subtle); + --bs-list-group-border-color: var(--bs-primary-border-subtle); + --bs-list-group-action-hover-color: var(--bs-emphasis-color); + --bs-list-group-action-hover-bg: var(--bs-primary-border-subtle); + --bs-list-group-action-active-color: var(--bs-emphasis-color); + --bs-list-group-action-active-bg: var(--bs-primary-border-subtle); + --bs-list-group-active-color: var(--bs-primary-bg-subtle); + --bs-list-group-active-bg: var(--bs-primary-text-emphasis); + --bs-list-group-active-border-color: var(--bs-primary-text-emphasis); } + +.list-group-item-secondary { + --bs-list-group-color: var(--bs-secondary-text-emphasis); + --bs-list-group-bg: var(--bs-secondary-bg-subtle); + --bs-list-group-border-color: var(--bs-secondary-border-subtle); + --bs-list-group-action-hover-color: var(--bs-emphasis-color); + --bs-list-group-action-hover-bg: var(--bs-secondary-border-subtle); + --bs-list-group-action-active-color: var(--bs-emphasis-color); + --bs-list-group-action-active-bg: var(--bs-secondary-border-subtle); + --bs-list-group-active-color: var(--bs-secondary-bg-subtle); + --bs-list-group-active-bg: var(--bs-secondary-text-emphasis); + --bs-list-group-active-border-color: var(--bs-secondary-text-emphasis); } + +.list-group-item-success { + --bs-list-group-color: var(--bs-success-text-emphasis); + --bs-list-group-bg: var(--bs-success-bg-subtle); + --bs-list-group-border-color: var(--bs-success-border-subtle); + --bs-list-group-action-hover-color: var(--bs-emphasis-color); + --bs-list-group-action-hover-bg: var(--bs-success-border-subtle); + --bs-list-group-action-active-color: var(--bs-emphasis-color); + --bs-list-group-action-active-bg: var(--bs-success-border-subtle); + --bs-list-group-active-color: var(--bs-success-bg-subtle); + --bs-list-group-active-bg: var(--bs-success-text-emphasis); + --bs-list-group-active-border-color: var(--bs-success-text-emphasis); } + +.list-group-item-info { + --bs-list-group-color: var(--bs-info-text-emphasis); + --bs-list-group-bg: var(--bs-info-bg-subtle); + --bs-list-group-border-color: var(--bs-info-border-subtle); + --bs-list-group-action-hover-color: var(--bs-emphasis-color); + --bs-list-group-action-hover-bg: var(--bs-info-border-subtle); + --bs-list-group-action-active-color: var(--bs-emphasis-color); + --bs-list-group-action-active-bg: var(--bs-info-border-subtle); + --bs-list-group-active-color: var(--bs-info-bg-subtle); + --bs-list-group-active-bg: var(--bs-info-text-emphasis); + --bs-list-group-active-border-color: var(--bs-info-text-emphasis); } + +.list-group-item-warning { + --bs-list-group-color: var(--bs-warning-text-emphasis); + --bs-list-group-bg: var(--bs-warning-bg-subtle); + --bs-list-group-border-color: var(--bs-warning-border-subtle); + --bs-list-group-action-hover-color: var(--bs-emphasis-color); + --bs-list-group-action-hover-bg: var(--bs-warning-border-subtle); + --bs-list-group-action-active-color: var(--bs-emphasis-color); + --bs-list-group-action-active-bg: var(--bs-warning-border-subtle); + --bs-list-group-active-color: var(--bs-warning-bg-subtle); + --bs-list-group-active-bg: var(--bs-warning-text-emphasis); + --bs-list-group-active-border-color: var(--bs-warning-text-emphasis); } + +.list-group-item-danger { + --bs-list-group-color: var(--bs-danger-text-emphasis); + --bs-list-group-bg: var(--bs-danger-bg-subtle); + --bs-list-group-border-color: var(--bs-danger-border-subtle); + --bs-list-group-action-hover-color: var(--bs-emphasis-color); + --bs-list-group-action-hover-bg: var(--bs-danger-border-subtle); + --bs-list-group-action-active-color: var(--bs-emphasis-color); + --bs-list-group-action-active-bg: var(--bs-danger-border-subtle); + --bs-list-group-active-color: var(--bs-danger-bg-subtle); + --bs-list-group-active-bg: var(--bs-danger-text-emphasis); + --bs-list-group-active-border-color: var(--bs-danger-text-emphasis); } + +.list-group-item-light { + --bs-list-group-color: var(--bs-light-text-emphasis); + --bs-list-group-bg: var(--bs-light-bg-subtle); + --bs-list-group-border-color: var(--bs-light-border-subtle); + --bs-list-group-action-hover-color: var(--bs-emphasis-color); + --bs-list-group-action-hover-bg: var(--bs-light-border-subtle); + --bs-list-group-action-active-color: var(--bs-emphasis-color); + --bs-list-group-action-active-bg: var(--bs-light-border-subtle); + --bs-list-group-active-color: var(--bs-light-bg-subtle); + --bs-list-group-active-bg: var(--bs-light-text-emphasis); + --bs-list-group-active-border-color: var(--bs-light-text-emphasis); } + +.list-group-item-dark { + --bs-list-group-color: var(--bs-dark-text-emphasis); + --bs-list-group-bg: var(--bs-dark-bg-subtle); + --bs-list-group-border-color: var(--bs-dark-border-subtle); + --bs-list-group-action-hover-color: var(--bs-emphasis-color); + --bs-list-group-action-hover-bg: var(--bs-dark-border-subtle); + --bs-list-group-action-active-color: var(--bs-emphasis-color); + --bs-list-group-action-active-bg: var(--bs-dark-border-subtle); + --bs-list-group-active-color: var(--bs-dark-bg-subtle); + --bs-list-group-active-bg: var(--bs-dark-text-emphasis); + --bs-list-group-active-border-color: var(--bs-dark-text-emphasis); } + +.btn-close { + --bs-btn-close-color: #000; + --bs-btn-close-bg: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23000'%3e%3cpath d='M.293.293a1 1 0 0 1 1.414 0L8 6.586 14.293.293a1 1 0 1 1 1.414 1.414L9.414 8l6.293 6.293a1 1 0 0 1-1.414 1.414L8 9.414l-6.293 6.293a1 1 0 0 1-1.414-1.414L6.586 8 .293 1.707a1 1 0 0 1 0-1.414z'/%3e%3c/svg%3e"); + --bs-btn-close-opacity: 0.5; + --bs-btn-close-hover-opacity: 0.75; + --bs-btn-close-focus-shadow: 0 0 0 0.25rem rgba(24, 157, 208, 0.25); + --bs-btn-close-focus-opacity: 1; + --bs-btn-close-disabled-opacity: 0.25; + --bs-btn-close-white-filter: invert(1) grayscale(100%) brightness(200%); + box-sizing: content-box; + width: 1em; + height: 1em; + padding: 0.25em 0.25em; + color: var(--bs-btn-close-color); + background: transparent var(--bs-btn-close-bg) center/1em auto no-repeat; + border: 0; + border-radius: 0.375rem; + opacity: var(--bs-btn-close-opacity); } + .btn-close:hover { + color: var(--bs-btn-close-color); + text-decoration: none; + opacity: var(--bs-btn-close-hover-opacity); } + .btn-close:focus { + outline: 0; + box-shadow: var(--bs-btn-close-focus-shadow); + opacity: var(--bs-btn-close-focus-opacity); } + .btn-close:disabled, .btn-close.disabled { + pointer-events: none; + user-select: none; + opacity: var(--bs-btn-close-disabled-opacity); } + +.btn-close-white { + filter: var(--bs-btn-close-white-filter); } + +[data-bs-theme="dark"] .btn-close { + filter: var(--bs-btn-close-white-filter); } + +.toast { + --bs-toast-zindex: 1090; + --bs-toast-padding-x: 0.75rem; + --bs-toast-padding-y: 0.5rem; + --bs-toast-spacing: 1.5rem; + --bs-toast-max-width: 350px; + --bs-toast-font-size: 0.875rem; + --bs-toast-color: ; + --bs-toast-bg: rgba(var(--bs-body-bg-rgb), 0.85); + --bs-toast-border-width: var(--bs-border-width); + --bs-toast-border-color: var(--bs-border-color-translucent); + --bs-toast-border-radius: var(--bs-border-radius); + --bs-toast-box-shadow: var(--bs-box-shadow); + --bs-toast-header-color: var(--bs-secondary-color); + --bs-toast-header-bg: rgba(var(--bs-body-bg-rgb), 0.85); + --bs-toast-header-border-color: var(--bs-border-color-translucent); + width: var(--bs-toast-max-width); + max-width: 100%; + font-size: var(--bs-toast-font-size); + color: var(--bs-toast-color); + pointer-events: auto; + background-color: var(--bs-toast-bg); + background-clip: padding-box; + border: var(--bs-toast-border-width) solid var(--bs-toast-border-color); + box-shadow: var(--bs-toast-box-shadow); + border-radius: var(--bs-toast-border-radius); } + .toast.showing { + opacity: 0; } + .toast:not(.show) { + display: none; } + +.toast-container { + --bs-toast-zindex: 1090; + position: absolute; + z-index: var(--bs-toast-zindex); + width: max-content; + max-width: 100%; + pointer-events: none; } + .toast-container > :not(:last-child) { + margin-bottom: var(--bs-toast-spacing); } + +.toast-header { + display: flex; + align-items: center; + padding: var(--bs-toast-padding-y) var(--bs-toast-padding-x); + color: var(--bs-toast-header-color); + background-color: var(--bs-toast-header-bg); + background-clip: padding-box; + border-bottom: var(--bs-toast-border-width) solid var(--bs-toast-header-border-color); + border-top-left-radius: calc(var(--bs-toast-border-radius) - var(--bs-toast-border-width)); + border-top-right-radius: calc(var(--bs-toast-border-radius) - var(--bs-toast-border-width)); } + .toast-header .btn-close { + margin-right: calc(-.5 * var(--bs-toast-padding-x)); + margin-left: var(--bs-toast-padding-x); } + +.toast-body { + padding: var(--bs-toast-padding-x); + word-wrap: break-word; } + +.modal { + --bs-modal-zindex: 1055; + --bs-modal-width: 500px; + --bs-modal-padding: 1rem; + --bs-modal-margin: 0.5rem; + --bs-modal-color: ; + --bs-modal-bg: var(--bs-body-bg); + --bs-modal-border-color: var(--bs-border-color-translucent); + --bs-modal-border-width: var(--bs-border-width); + --bs-modal-border-radius: var(--bs-border-radius-lg); + --bs-modal-box-shadow: var(--bs-box-shadow-sm); + --bs-modal-inner-border-radius: calc(var(--bs-border-radius-lg) - (var(--bs-border-width))); + --bs-modal-header-padding-x: 1rem; + --bs-modal-header-padding-y: 1rem; + --bs-modal-header-padding: 1rem 1rem; + --bs-modal-header-border-color: var(--bs-border-color); + --bs-modal-header-border-width: var(--bs-border-width); + --bs-modal-title-line-height: 1.5; + --bs-modal-footer-gap: 0.5rem; + --bs-modal-footer-bg: ; + --bs-modal-footer-border-color: var(--bs-border-color); + --bs-modal-footer-border-width: var(--bs-border-width); + position: fixed; + top: 0; + left: 0; + z-index: var(--bs-modal-zindex); + display: none; + width: 100%; + height: 100%; + overflow-x: hidden; + overflow-y: auto; + outline: 0; } + +.modal-dialog { + position: relative; + width: auto; + margin: var(--bs-modal-margin); + pointer-events: none; } + .modal.fade .modal-dialog { + transition: transform 0.3s ease-out; + transform: translate(0, -50px); } + @media (prefers-reduced-motion: reduce) { + .modal.fade .modal-dialog { + transition: none; } } + .modal.show .modal-dialog { + transform: none; } + .modal.modal-static .modal-dialog { + transform: scale(1.02); } + +.modal-dialog-scrollable { + height: calc(100% - var(--bs-modal-margin) * 2); } + .modal-dialog-scrollable .modal-content { + max-height: 100%; + overflow: hidden; } + .modal-dialog-scrollable .modal-body { + overflow-y: auto; } + +.modal-dialog-centered { + display: flex; + align-items: center; + min-height: calc(100% - var(--bs-modal-margin) * 2); } + +.modal-content { + position: relative; + display: flex; + flex-direction: column; + width: 100%; + color: var(--bs-modal-color); + pointer-events: auto; + background-color: var(--bs-modal-bg); + background-clip: padding-box; + border: var(--bs-modal-border-width) solid var(--bs-modal-border-color); + border-radius: var(--bs-modal-border-radius); + box-shadow: var(--bs-modal-box-shadow); + outline: 0; } + +.modal-backdrop { + --bs-backdrop-zindex: 1050; + --bs-backdrop-bg: #000; + --bs-backdrop-opacity: 0.5; + position: fixed; + top: 0; + left: 0; + z-index: var(--bs-backdrop-zindex); + width: 100vw; + height: 100vh; + background-color: var(--bs-backdrop-bg); } + .modal-backdrop.fade { + opacity: 0; } + .modal-backdrop.show { + opacity: var(--bs-backdrop-opacity); } + +.modal-header { + display: flex; + flex-shrink: 0; + align-items: center; + padding: var(--bs-modal-header-padding); + border-bottom: var(--bs-modal-header-border-width) solid var(--bs-modal-header-border-color); + border-top-left-radius: var(--bs-modal-inner-border-radius); + border-top-right-radius: var(--bs-modal-inner-border-radius); } + .modal-header .btn-close { + padding: calc(var(--bs-modal-header-padding-y) * .5) calc(var(--bs-modal-header-padding-x) * .5); + margin: calc(-.5 * var(--bs-modal-header-padding-y)) calc(-.5 * var(--bs-modal-header-padding-x)) calc(-.5 * var(--bs-modal-header-padding-y)) auto; } + +.modal-title { + margin-bottom: 0; + line-height: var(--bs-modal-title-line-height); } + +.modal-body { + position: relative; + flex: 1 1 auto; + padding: var(--bs-modal-padding); } + +.modal-footer { + display: flex; + flex-shrink: 0; + flex-wrap: wrap; + align-items: center; + justify-content: flex-end; + padding: calc(var(--bs-modal-padding) - var(--bs-modal-footer-gap) * .5); + background-color: var(--bs-modal-footer-bg); + border-top: var(--bs-modal-footer-border-width) solid var(--bs-modal-footer-border-color); + border-bottom-right-radius: var(--bs-modal-inner-border-radius); + border-bottom-left-radius: var(--bs-modal-inner-border-radius); } + .modal-footer > * { + margin: calc(var(--bs-modal-footer-gap) * .5); } + +@media (min-width: 576px) { + .modal { + --bs-modal-margin: 1.75rem; + --bs-modal-box-shadow: var(--bs-box-shadow); } + .modal-dialog { + max-width: var(--bs-modal-width); + margin-right: auto; + margin-left: auto; } + .modal-sm { + --bs-modal-width: 300px; } } + +@media (min-width: 992px) { + .modal-lg, + .modal-xl { + --bs-modal-width: 800px; } } + +@media (min-width: 1200px) { + .modal-xl { + --bs-modal-width: 1140px; } } + +.modal-fullscreen { + width: 100vw; + max-width: none; + height: 100%; + margin: 0; } + .modal-fullscreen .modal-content { + height: 100%; + border: 0; + border-radius: 0; } + .modal-fullscreen .modal-header, + .modal-fullscreen .modal-footer { + border-radius: 0; } + .modal-fullscreen .modal-body { + overflow-y: auto; } + +@media (max-width: 575.98px) { + .modal-fullscreen-sm-down { + width: 100vw; + max-width: none; + height: 100%; + margin: 0; } + .modal-fullscreen-sm-down .modal-content { + height: 100%; + border: 0; + border-radius: 0; } + .modal-fullscreen-sm-down .modal-header, + .modal-fullscreen-sm-down .modal-footer { + border-radius: 0; } + .modal-fullscreen-sm-down .modal-body { + overflow-y: auto; } } + +@media (max-width: 767.98px) { + .modal-fullscreen-md-down { + width: 100vw; + max-width: none; + height: 100%; + margin: 0; } + .modal-fullscreen-md-down .modal-content { + height: 100%; + border: 0; + border-radius: 0; } + .modal-fullscreen-md-down .modal-header, + .modal-fullscreen-md-down .modal-footer { + border-radius: 0; } + .modal-fullscreen-md-down .modal-body { + overflow-y: auto; } } + +@media (max-width: 991.98px) { + .modal-fullscreen-lg-down { + width: 100vw; + max-width: none; + height: 100%; + margin: 0; } + .modal-fullscreen-lg-down .modal-content { + height: 100%; + border: 0; + border-radius: 0; } + .modal-fullscreen-lg-down .modal-header, + .modal-fullscreen-lg-down .modal-footer { + border-radius: 0; } + .modal-fullscreen-lg-down .modal-body { + overflow-y: auto; } } + +@media (max-width: 1199.98px) { + .modal-fullscreen-xl-down { + width: 100vw; + max-width: none; + height: 100%; + margin: 0; } + .modal-fullscreen-xl-down .modal-content { + height: 100%; + border: 0; + border-radius: 0; } + .modal-fullscreen-xl-down .modal-header, + .modal-fullscreen-xl-down .modal-footer { + border-radius: 0; } + .modal-fullscreen-xl-down .modal-body { + overflow-y: auto; } } + +@media (max-width: 1399.98px) { + .modal-fullscreen-xxl-down { + width: 100vw; + max-width: none; + height: 100%; + margin: 0; } + .modal-fullscreen-xxl-down .modal-content { + height: 100%; + border: 0; + border-radius: 0; } + .modal-fullscreen-xxl-down .modal-header, + .modal-fullscreen-xxl-down .modal-footer { + border-radius: 0; } + .modal-fullscreen-xxl-down .modal-body { + overflow-y: auto; } } + +.tooltip { + --bs-tooltip-zindex: 1080; + --bs-tooltip-max-width: 200px; + --bs-tooltip-padding-x: 0.5rem; + --bs-tooltip-padding-y: 0.25rem; + --bs-tooltip-margin: ; + --bs-tooltip-font-size: 0.875rem; + --bs-tooltip-color: var(--bs-body-bg); + --bs-tooltip-bg: var(--bs-emphasis-color); + --bs-tooltip-border-radius: var(--bs-border-radius); + --bs-tooltip-opacity: 0.9; + --bs-tooltip-arrow-width: 0.8rem; + --bs-tooltip-arrow-height: 0.4rem; + z-index: var(--bs-tooltip-zindex); + display: block; + margin: var(--bs-tooltip-margin); + font-family: "Arimo", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-style: normal; + font-weight: 400; + line-height: 1.5; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + white-space: normal; + word-spacing: normal; + line-break: auto; + font-size: var(--bs-tooltip-font-size); + word-wrap: break-word; + opacity: 0; } + .tooltip.show { + opacity: var(--bs-tooltip-opacity); } + .tooltip .tooltip-arrow { + display: block; + width: var(--bs-tooltip-arrow-width); + height: var(--bs-tooltip-arrow-height); } + .tooltip .tooltip-arrow::before { + position: absolute; + content: ""; + border-color: transparent; + border-style: solid; } + +.bs-tooltip-top .tooltip-arrow, .bs-tooltip-auto[data-popper-placement^="top"] .tooltip-arrow { + bottom: calc(-1 * var(--bs-tooltip-arrow-height)); } + .bs-tooltip-top .tooltip-arrow::before, .bs-tooltip-auto[data-popper-placement^="top"] .tooltip-arrow::before { + top: -1px; + border-width: var(--bs-tooltip-arrow-height) calc(var(--bs-tooltip-arrow-width) * .5) 0; + border-top-color: var(--bs-tooltip-bg); } + +/* rtl:begin:ignore */ +.bs-tooltip-end .tooltip-arrow, .bs-tooltip-auto[data-popper-placement^="right"] .tooltip-arrow { + left: calc(-1 * var(--bs-tooltip-arrow-height)); + width: var(--bs-tooltip-arrow-height); + height: var(--bs-tooltip-arrow-width); } + .bs-tooltip-end .tooltip-arrow::before, .bs-tooltip-auto[data-popper-placement^="right"] .tooltip-arrow::before { + right: -1px; + border-width: calc(var(--bs-tooltip-arrow-width) * .5) var(--bs-tooltip-arrow-height) calc(var(--bs-tooltip-arrow-width) * .5) 0; + border-right-color: var(--bs-tooltip-bg); } + +/* rtl:end:ignore */ +.bs-tooltip-bottom .tooltip-arrow, .bs-tooltip-auto[data-popper-placement^="bottom"] .tooltip-arrow { + top: calc(-1 * var(--bs-tooltip-arrow-height)); } + .bs-tooltip-bottom .tooltip-arrow::before, .bs-tooltip-auto[data-popper-placement^="bottom"] .tooltip-arrow::before { + bottom: -1px; + border-width: 0 calc(var(--bs-tooltip-arrow-width) * .5) var(--bs-tooltip-arrow-height); + border-bottom-color: var(--bs-tooltip-bg); } + +/* rtl:begin:ignore */ +.bs-tooltip-start .tooltip-arrow, .bs-tooltip-auto[data-popper-placement^="left"] .tooltip-arrow { + right: calc(-1 * var(--bs-tooltip-arrow-height)); + width: var(--bs-tooltip-arrow-height); + height: var(--bs-tooltip-arrow-width); } + .bs-tooltip-start .tooltip-arrow::before, .bs-tooltip-auto[data-popper-placement^="left"] .tooltip-arrow::before { + left: -1px; + border-width: calc(var(--bs-tooltip-arrow-width) * .5) 0 calc(var(--bs-tooltip-arrow-width) * .5) var(--bs-tooltip-arrow-height); + border-left-color: var(--bs-tooltip-bg); } + +/* rtl:end:ignore */ +.tooltip-inner { + max-width: var(--bs-tooltip-max-width); + padding: var(--bs-tooltip-padding-y) var(--bs-tooltip-padding-x); + color: var(--bs-tooltip-color); + text-align: center; + background-color: var(--bs-tooltip-bg); + border-radius: var(--bs-tooltip-border-radius); } + +.popover { + --bs-popover-zindex: 1070; + --bs-popover-max-width: 276px; + --bs-popover-font-size: 0.875rem; + --bs-popover-bg: var(--bs-body-bg); + --bs-popover-border-width: var(--bs-border-width); + --bs-popover-border-color: var(--bs-border-color-translucent); + --bs-popover-border-radius: var(--bs-border-radius-lg); + --bs-popover-inner-border-radius: calc(var(--bs-border-radius-lg) - var(--bs-border-width)); + --bs-popover-box-shadow: var(--bs-box-shadow); + --bs-popover-header-padding-x: 1rem; + --bs-popover-header-padding-y: 0.5rem; + --bs-popover-header-font-size: 1rem; + --bs-popover-header-color: inherit; + --bs-popover-header-bg: var(--bs-secondary-bg); + --bs-popover-body-padding-x: 1rem; + --bs-popover-body-padding-y: 1rem; + --bs-popover-body-color: var(--bs-body-color); + --bs-popover-arrow-width: 1rem; + --bs-popover-arrow-height: 0.5rem; + --bs-popover-arrow-border: var(--bs-popover-border-color); + z-index: var(--bs-popover-zindex); + display: block; + max-width: var(--bs-popover-max-width); + font-family: "Arimo", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-style: normal; + font-weight: 400; + line-height: 1.5; + text-align: left; + text-align: start; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-break: normal; + white-space: normal; + word-spacing: normal; + line-break: auto; + font-size: var(--bs-popover-font-size); + word-wrap: break-word; + background-color: var(--bs-popover-bg); + background-clip: padding-box; + border: var(--bs-popover-border-width) solid var(--bs-popover-border-color); + border-radius: var(--bs-popover-border-radius); + box-shadow: var(--bs-popover-box-shadow); } + .popover .popover-arrow { + display: block; + width: var(--bs-popover-arrow-width); + height: var(--bs-popover-arrow-height); } + .popover .popover-arrow::before, .popover .popover-arrow::after { + position: absolute; + display: block; + content: ""; + border-color: transparent; + border-style: solid; + border-width: 0; } + +.bs-popover-top > .popover-arrow, .bs-popover-auto[data-popper-placement^="top"] > .popover-arrow { + bottom: calc(-1 * (var(--bs-popover-arrow-height)) - var(--bs-popover-border-width)); } + .bs-popover-top > .popover-arrow::before, .bs-popover-auto[data-popper-placement^="top"] > .popover-arrow::before, .bs-popover-top > .popover-arrow::after, .bs-popover-auto[data-popper-placement^="top"] > .popover-arrow::after { + border-width: var(--bs-popover-arrow-height) calc(var(--bs-popover-arrow-width) * .5) 0; } + .bs-popover-top > .popover-arrow::before, .bs-popover-auto[data-popper-placement^="top"] > .popover-arrow::before { + bottom: 0; + border-top-color: var(--bs-popover-arrow-border); } + .bs-popover-top > .popover-arrow::after, .bs-popover-auto[data-popper-placement^="top"] > .popover-arrow::after { + bottom: var(--bs-popover-border-width); + border-top-color: var(--bs-popover-bg); } + +/* rtl:begin:ignore */ +.bs-popover-end > .popover-arrow, .bs-popover-auto[data-popper-placement^="right"] > .popover-arrow { + left: calc(-1 * (var(--bs-popover-arrow-height)) - var(--bs-popover-border-width)); + width: var(--bs-popover-arrow-height); + height: var(--bs-popover-arrow-width); } + .bs-popover-end > .popover-arrow::before, .bs-popover-auto[data-popper-placement^="right"] > .popover-arrow::before, .bs-popover-end > .popover-arrow::after, .bs-popover-auto[data-popper-placement^="right"] > .popover-arrow::after { + border-width: calc(var(--bs-popover-arrow-width) * .5) var(--bs-popover-arrow-height) calc(var(--bs-popover-arrow-width) * .5) 0; } + .bs-popover-end > .popover-arrow::before, .bs-popover-auto[data-popper-placement^="right"] > .popover-arrow::before { + left: 0; + border-right-color: var(--bs-popover-arrow-border); } + .bs-popover-end > .popover-arrow::after, .bs-popover-auto[data-popper-placement^="right"] > .popover-arrow::after { + left: var(--bs-popover-border-width); + border-right-color: var(--bs-popover-bg); } + +/* rtl:end:ignore */ +.bs-popover-bottom > .popover-arrow, .bs-popover-auto[data-popper-placement^="bottom"] > .popover-arrow { + top: calc(-1 * (var(--bs-popover-arrow-height)) - var(--bs-popover-border-width)); } + .bs-popover-bottom > .popover-arrow::before, .bs-popover-auto[data-popper-placement^="bottom"] > .popover-arrow::before, .bs-popover-bottom > .popover-arrow::after, .bs-popover-auto[data-popper-placement^="bottom"] > .popover-arrow::after { + border-width: 0 calc(var(--bs-popover-arrow-width) * .5) var(--bs-popover-arrow-height); } + .bs-popover-bottom > .popover-arrow::before, .bs-popover-auto[data-popper-placement^="bottom"] > .popover-arrow::before { + top: 0; + border-bottom-color: var(--bs-popover-arrow-border); } + .bs-popover-bottom > .popover-arrow::after, .bs-popover-auto[data-popper-placement^="bottom"] > .popover-arrow::after { + top: var(--bs-popover-border-width); + border-bottom-color: var(--bs-popover-bg); } + +.bs-popover-bottom .popover-header::before, .bs-popover-auto[data-popper-placement^="bottom"] .popover-header::before { + position: absolute; + top: 0; + left: 50%; + display: block; + width: var(--bs-popover-arrow-width); + margin-left: calc(-.5 * var(--bs-popover-arrow-width)); + content: ""; + border-bottom: var(--bs-popover-border-width) solid var(--bs-popover-header-bg); } + +/* rtl:begin:ignore */ +.bs-popover-start > .popover-arrow, .bs-popover-auto[data-popper-placement^="left"] > .popover-arrow { + right: calc(-1 * (var(--bs-popover-arrow-height)) - var(--bs-popover-border-width)); + width: var(--bs-popover-arrow-height); + height: var(--bs-popover-arrow-width); } + .bs-popover-start > .popover-arrow::before, .bs-popover-auto[data-popper-placement^="left"] > .popover-arrow::before, .bs-popover-start > .popover-arrow::after, .bs-popover-auto[data-popper-placement^="left"] > .popover-arrow::after { + border-width: calc(var(--bs-popover-arrow-width) * .5) 0 calc(var(--bs-popover-arrow-width) * .5) var(--bs-popover-arrow-height); } + .bs-popover-start > .popover-arrow::before, .bs-popover-auto[data-popper-placement^="left"] > .popover-arrow::before { + right: 0; + border-left-color: var(--bs-popover-arrow-border); } + .bs-popover-start > .popover-arrow::after, .bs-popover-auto[data-popper-placement^="left"] > .popover-arrow::after { + right: var(--bs-popover-border-width); + border-left-color: var(--bs-popover-bg); } + +/* rtl:end:ignore */ +.popover-header { + padding: var(--bs-popover-header-padding-y) var(--bs-popover-header-padding-x); + margin-bottom: 0; + font-size: var(--bs-popover-header-font-size); + color: var(--bs-popover-header-color); + background-color: var(--bs-popover-header-bg); + border-bottom: var(--bs-popover-border-width) solid var(--bs-popover-border-color); + border-top-left-radius: var(--bs-popover-inner-border-radius); + border-top-right-radius: var(--bs-popover-inner-border-radius); } + .popover-header:empty { + display: none; } + +.popover-body { + padding: var(--bs-popover-body-padding-y) var(--bs-popover-body-padding-x); + color: var(--bs-popover-body-color); } + +.carousel { + position: relative; } + +.carousel.pointer-event { + touch-action: pan-y; } + +.carousel-inner { + position: relative; + width: 100%; + overflow: hidden; } + .carousel-inner::after { + display: block; + clear: both; + content: ""; } + +.carousel-item { + position: relative; + display: none; + float: left; + width: 100%; + margin-right: -100%; + backface-visibility: hidden; + transition: transform 0.6s ease-in-out; } + @media (prefers-reduced-motion: reduce) { + .carousel-item { + transition: none; } } +.carousel-item.active, +.carousel-item-next, +.carousel-item-prev { + display: block; } + +.carousel-item-next:not(.carousel-item-start), +.active.carousel-item-end { + transform: translateX(100%); } + +.carousel-item-prev:not(.carousel-item-end), +.active.carousel-item-start { + transform: translateX(-100%); } + +.carousel-fade .carousel-item { + opacity: 0; + transition-property: opacity; + transform: none; } + +.carousel-fade .carousel-item.active, +.carousel-fade .carousel-item-next.carousel-item-start, +.carousel-fade .carousel-item-prev.carousel-item-end { + z-index: 1; + opacity: 1; } + +.carousel-fade .active.carousel-item-start, +.carousel-fade .active.carousel-item-end { + z-index: 0; + opacity: 0; + transition: opacity 0s 0.6s; } + @media (prefers-reduced-motion: reduce) { + .carousel-fade .active.carousel-item-start, + .carousel-fade .active.carousel-item-end { + transition: none; } } +.carousel-control-prev, +.carousel-control-next { + position: absolute; + top: 0; + bottom: 0; + z-index: 1; + display: flex; + align-items: center; + justify-content: center; + width: 15%; + padding: 0; + color: #fff; + text-align: center; + background: none; + border: 0; + opacity: 0.5; + transition: opacity 0.15s ease; } + @media (prefers-reduced-motion: reduce) { + .carousel-control-prev, + .carousel-control-next { + transition: none; } } + .carousel-control-prev:hover, .carousel-control-prev:focus, + .carousel-control-next:hover, + .carousel-control-next:focus { + color: #fff; + text-decoration: none; + outline: 0; + opacity: 0.9; } + +.carousel-control-prev { + left: 0; + background-image: linear-gradient(90deg, rgba(0, 0, 0, 0.25), rgba(0, 0, 0, 0.001)); } + +.carousel-control-next { + right: 0; + background-image: linear-gradient(270deg, rgba(0, 0, 0, 0.25), rgba(0, 0, 0, 0.001)); } + +.carousel-control-prev-icon, +.carousel-control-next-icon { + display: inline-block; + width: 2rem; + height: 2rem; + background-repeat: no-repeat; + background-position: 50%; + background-size: 100% 100%; } + +.carousel-control-prev-icon { + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e") /*rtl:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e")*/; } + +.carousel-control-next-icon { + background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M4.646 1.646a.5.5 0 0 1 .708 0l6 6a.5.5 0 0 1 0 .708l-6 6a.5.5 0 0 1-.708-.708L10.293 8 4.646 2.354a.5.5 0 0 1 0-.708z'/%3e%3c/svg%3e") /*rtl:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 16 16' fill='%23fff'%3e%3cpath d='M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z'/%3e%3c/svg%3e")*/; } + +.carousel-indicators { + position: absolute; + right: 0; + bottom: 0; + left: 0; + z-index: 2; + display: flex; + justify-content: center; + padding: 0; + margin-right: 15%; + margin-bottom: 1rem; + margin-left: 15%; } + .carousel-indicators [data-bs-target] { + box-sizing: content-box; + flex: 0 1 auto; + width: 30px; + height: 3px; + padding: 0; + margin-right: 3px; + margin-left: 3px; + text-indent: -999px; + cursor: pointer; + background-color: #fff; + background-clip: padding-box; + border: 0; + border-top: 10px solid transparent; + border-bottom: 10px solid transparent; + opacity: 0.5; + transition: opacity 0.6s ease; } + @media (prefers-reduced-motion: reduce) { + .carousel-indicators [data-bs-target] { + transition: none; } } + .carousel-indicators .active { + opacity: 1; } + +.carousel-caption { + position: absolute; + right: 15%; + bottom: 1.25rem; + left: 15%; + padding-top: 1.25rem; + padding-bottom: 1.25rem; + color: #fff; + text-align: center; } + +.carousel-dark .carousel-control-prev-icon, +.carousel-dark .carousel-control-next-icon { + filter: invert(1) grayscale(100); } + +.carousel-dark .carousel-indicators [data-bs-target] { + background-color: #000; } + +.carousel-dark .carousel-caption { + color: #000; } + +[data-bs-theme="dark"] .carousel .carousel-control-prev-icon, +[data-bs-theme="dark"] .carousel .carousel-control-next-icon, [data-bs-theme="dark"].carousel .carousel-control-prev-icon, +[data-bs-theme="dark"].carousel .carousel-control-next-icon { + filter: invert(1) grayscale(100); } + +[data-bs-theme="dark"] .carousel .carousel-indicators [data-bs-target], [data-bs-theme="dark"].carousel .carousel-indicators [data-bs-target] { + background-color: #000; } + +[data-bs-theme="dark"] .carousel .carousel-caption, [data-bs-theme="dark"].carousel .carousel-caption { + color: #000; } + +.spinner-grow, +.spinner-border { + display: inline-block; + width: var(--bs-spinner-width); + height: var(--bs-spinner-height); + vertical-align: var(--bs-spinner-vertical-align); + border-radius: 50%; + animation: var(--bs-spinner-animation-speed) linear infinite var(--bs-spinner-animation-name); } + +@keyframes spinner-border { + to { + transform: rotate(360deg) /* rtl:ignore */; } } + +.spinner-border { + --bs-spinner-width: 2rem; + --bs-spinner-height: 2rem; + --bs-spinner-vertical-align: -0.125em; + --bs-spinner-border-width: 0.25em; + --bs-spinner-animation-speed: 0.75s; + --bs-spinner-animation-name: spinner-border; + border: var(--bs-spinner-border-width) solid currentcolor; + border-right-color: transparent; } + +.spinner-border-sm { + --bs-spinner-width: 1rem; + --bs-spinner-height: 1rem; + --bs-spinner-border-width: 0.2em; } + +@keyframes spinner-grow { + 0% { + transform: scale(0); } + 50% { + opacity: 1; + transform: none; } } + +.spinner-grow { + --bs-spinner-width: 2rem; + --bs-spinner-height: 2rem; + --bs-spinner-vertical-align: -0.125em; + --bs-spinner-animation-speed: 0.75s; + --bs-spinner-animation-name: spinner-grow; + background-color: currentcolor; + opacity: 0; } + +.spinner-grow-sm { + --bs-spinner-width: 1rem; + --bs-spinner-height: 1rem; } + +@media (prefers-reduced-motion: reduce) { + .spinner-border, + .spinner-grow { + --bs-spinner-animation-speed: 1.5s; } } + +.offcanvas, .offcanvas-xxl, .offcanvas-xl, .offcanvas-lg, .offcanvas-md, .offcanvas-sm { + --bs-offcanvas-zindex: 1045; + --bs-offcanvas-width: 400px; + --bs-offcanvas-height: 30vh; + --bs-offcanvas-padding-x: 1rem; + --bs-offcanvas-padding-y: 1rem; + --bs-offcanvas-color: var(--bs-body-color); + --bs-offcanvas-bg: var(--bs-body-bg); + --bs-offcanvas-border-width: var(--bs-border-width); + --bs-offcanvas-border-color: var(--bs-border-color-translucent); + --bs-offcanvas-box-shadow: var(--bs-box-shadow-sm); + --bs-offcanvas-transition: transform 0.3s ease-in-out; + --bs-offcanvas-title-line-height: 1.5; } + +@media (max-width: 575.98px) { + .offcanvas-sm { + position: fixed; + bottom: 0; + z-index: var(--bs-offcanvas-zindex); + display: flex; + flex-direction: column; + max-width: 100%; + color: var(--bs-offcanvas-color); + visibility: hidden; + background-color: var(--bs-offcanvas-bg); + background-clip: padding-box; + outline: 0; + box-shadow: var(--bs-offcanvas-box-shadow); + transition: var(--bs-offcanvas-transition); } } + @media (max-width: 575.98px) and (prefers-reduced-motion: reduce) { + .offcanvas-sm { + transition: none; } } +@media (max-width: 575.98px) { + .offcanvas-sm.offcanvas-start { + top: 0; + left: 0; + width: var(--bs-offcanvas-width); + border-right: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateX(-100%); } + .offcanvas-sm.offcanvas-end { + top: 0; + right: 0; + width: var(--bs-offcanvas-width); + border-left: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateX(100%); } + .offcanvas-sm.offcanvas-top { + top: 0; + right: 0; + left: 0; + height: var(--bs-offcanvas-height); + max-height: 100%; + border-bottom: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateY(-100%); } + .offcanvas-sm.offcanvas-bottom { + right: 0; + left: 0; + height: var(--bs-offcanvas-height); + max-height: 100%; + border-top: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateY(100%); } + .offcanvas-sm.showing, .offcanvas-sm.show:not(.hiding) { + transform: none; } + .offcanvas-sm.showing, .offcanvas-sm.hiding, .offcanvas-sm.show { + visibility: visible; } } + +@media (min-width: 576px) { + .offcanvas-sm { + --bs-offcanvas-height: auto; + --bs-offcanvas-border-width: 0; + background-color: transparent !important; } + .offcanvas-sm .offcanvas-header { + display: none; } + .offcanvas-sm .offcanvas-body { + display: flex; + flex-grow: 0; + padding: 0; + overflow-y: visible; + background-color: transparent !important; } } + +@media (max-width: 767.98px) { + .offcanvas-md { + position: fixed; + bottom: 0; + z-index: var(--bs-offcanvas-zindex); + display: flex; + flex-direction: column; + max-width: 100%; + color: var(--bs-offcanvas-color); + visibility: hidden; + background-color: var(--bs-offcanvas-bg); + background-clip: padding-box; + outline: 0; + box-shadow: var(--bs-offcanvas-box-shadow); + transition: var(--bs-offcanvas-transition); } } + @media (max-width: 767.98px) and (prefers-reduced-motion: reduce) { + .offcanvas-md { + transition: none; } } +@media (max-width: 767.98px) { + .offcanvas-md.offcanvas-start { + top: 0; + left: 0; + width: var(--bs-offcanvas-width); + border-right: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateX(-100%); } + .offcanvas-md.offcanvas-end { + top: 0; + right: 0; + width: var(--bs-offcanvas-width); + border-left: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateX(100%); } + .offcanvas-md.offcanvas-top { + top: 0; + right: 0; + left: 0; + height: var(--bs-offcanvas-height); + max-height: 100%; + border-bottom: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateY(-100%); } + .offcanvas-md.offcanvas-bottom { + right: 0; + left: 0; + height: var(--bs-offcanvas-height); + max-height: 100%; + border-top: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateY(100%); } + .offcanvas-md.showing, .offcanvas-md.show:not(.hiding) { + transform: none; } + .offcanvas-md.showing, .offcanvas-md.hiding, .offcanvas-md.show { + visibility: visible; } } + +@media (min-width: 768px) { + .offcanvas-md { + --bs-offcanvas-height: auto; + --bs-offcanvas-border-width: 0; + background-color: transparent !important; } + .offcanvas-md .offcanvas-header { + display: none; } + .offcanvas-md .offcanvas-body { + display: flex; + flex-grow: 0; + padding: 0; + overflow-y: visible; + background-color: transparent !important; } } + +@media (max-width: 991.98px) { + .offcanvas-lg { + position: fixed; + bottom: 0; + z-index: var(--bs-offcanvas-zindex); + display: flex; + flex-direction: column; + max-width: 100%; + color: var(--bs-offcanvas-color); + visibility: hidden; + background-color: var(--bs-offcanvas-bg); + background-clip: padding-box; + outline: 0; + box-shadow: var(--bs-offcanvas-box-shadow); + transition: var(--bs-offcanvas-transition); } } + @media (max-width: 991.98px) and (prefers-reduced-motion: reduce) { + .offcanvas-lg { + transition: none; } } +@media (max-width: 991.98px) { + .offcanvas-lg.offcanvas-start { + top: 0; + left: 0; + width: var(--bs-offcanvas-width); + border-right: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateX(-100%); } + .offcanvas-lg.offcanvas-end { + top: 0; + right: 0; + width: var(--bs-offcanvas-width); + border-left: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateX(100%); } + .offcanvas-lg.offcanvas-top { + top: 0; + right: 0; + left: 0; + height: var(--bs-offcanvas-height); + max-height: 100%; + border-bottom: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateY(-100%); } + .offcanvas-lg.offcanvas-bottom { + right: 0; + left: 0; + height: var(--bs-offcanvas-height); + max-height: 100%; + border-top: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateY(100%); } + .offcanvas-lg.showing, .offcanvas-lg.show:not(.hiding) { + transform: none; } + .offcanvas-lg.showing, .offcanvas-lg.hiding, .offcanvas-lg.show { + visibility: visible; } } + +@media (min-width: 992px) { + .offcanvas-lg { + --bs-offcanvas-height: auto; + --bs-offcanvas-border-width: 0; + background-color: transparent !important; } + .offcanvas-lg .offcanvas-header { + display: none; } + .offcanvas-lg .offcanvas-body { + display: flex; + flex-grow: 0; + padding: 0; + overflow-y: visible; + background-color: transparent !important; } } + +@media (max-width: 1199.98px) { + .offcanvas-xl { + position: fixed; + bottom: 0; + z-index: var(--bs-offcanvas-zindex); + display: flex; + flex-direction: column; + max-width: 100%; + color: var(--bs-offcanvas-color); + visibility: hidden; + background-color: var(--bs-offcanvas-bg); + background-clip: padding-box; + outline: 0; + box-shadow: var(--bs-offcanvas-box-shadow); + transition: var(--bs-offcanvas-transition); } } + @media (max-width: 1199.98px) and (prefers-reduced-motion: reduce) { + .offcanvas-xl { + transition: none; } } +@media (max-width: 1199.98px) { + .offcanvas-xl.offcanvas-start { + top: 0; + left: 0; + width: var(--bs-offcanvas-width); + border-right: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateX(-100%); } + .offcanvas-xl.offcanvas-end { + top: 0; + right: 0; + width: var(--bs-offcanvas-width); + border-left: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateX(100%); } + .offcanvas-xl.offcanvas-top { + top: 0; + right: 0; + left: 0; + height: var(--bs-offcanvas-height); + max-height: 100%; + border-bottom: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateY(-100%); } + .offcanvas-xl.offcanvas-bottom { + right: 0; + left: 0; + height: var(--bs-offcanvas-height); + max-height: 100%; + border-top: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateY(100%); } + .offcanvas-xl.showing, .offcanvas-xl.show:not(.hiding) { + transform: none; } + .offcanvas-xl.showing, .offcanvas-xl.hiding, .offcanvas-xl.show { + visibility: visible; } } + +@media (min-width: 1200px) { + .offcanvas-xl { + --bs-offcanvas-height: auto; + --bs-offcanvas-border-width: 0; + background-color: transparent !important; } + .offcanvas-xl .offcanvas-header { + display: none; } + .offcanvas-xl .offcanvas-body { + display: flex; + flex-grow: 0; + padding: 0; + overflow-y: visible; + background-color: transparent !important; } } + +@media (max-width: 1399.98px) { + .offcanvas-xxl { + position: fixed; + bottom: 0; + z-index: var(--bs-offcanvas-zindex); + display: flex; + flex-direction: column; + max-width: 100%; + color: var(--bs-offcanvas-color); + visibility: hidden; + background-color: var(--bs-offcanvas-bg); + background-clip: padding-box; + outline: 0; + box-shadow: var(--bs-offcanvas-box-shadow); + transition: var(--bs-offcanvas-transition); } } + @media (max-width: 1399.98px) and (prefers-reduced-motion: reduce) { + .offcanvas-xxl { + transition: none; } } +@media (max-width: 1399.98px) { + .offcanvas-xxl.offcanvas-start { + top: 0; + left: 0; + width: var(--bs-offcanvas-width); + border-right: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateX(-100%); } + .offcanvas-xxl.offcanvas-end { + top: 0; + right: 0; + width: var(--bs-offcanvas-width); + border-left: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateX(100%); } + .offcanvas-xxl.offcanvas-top { + top: 0; + right: 0; + left: 0; + height: var(--bs-offcanvas-height); + max-height: 100%; + border-bottom: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateY(-100%); } + .offcanvas-xxl.offcanvas-bottom { + right: 0; + left: 0; + height: var(--bs-offcanvas-height); + max-height: 100%; + border-top: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateY(100%); } + .offcanvas-xxl.showing, .offcanvas-xxl.show:not(.hiding) { + transform: none; } + .offcanvas-xxl.showing, .offcanvas-xxl.hiding, .offcanvas-xxl.show { + visibility: visible; } } + +@media (min-width: 1400px) { + .offcanvas-xxl { + --bs-offcanvas-height: auto; + --bs-offcanvas-border-width: 0; + background-color: transparent !important; } + .offcanvas-xxl .offcanvas-header { + display: none; } + .offcanvas-xxl .offcanvas-body { + display: flex; + flex-grow: 0; + padding: 0; + overflow-y: visible; + background-color: transparent !important; } } + +.offcanvas { + position: fixed; + bottom: 0; + z-index: var(--bs-offcanvas-zindex); + display: flex; + flex-direction: column; + max-width: 100%; + color: var(--bs-offcanvas-color); + visibility: hidden; + background-color: var(--bs-offcanvas-bg); + background-clip: padding-box; + outline: 0; + box-shadow: var(--bs-offcanvas-box-shadow); + transition: var(--bs-offcanvas-transition); } + @media (prefers-reduced-motion: reduce) { + .offcanvas { + transition: none; } } + .offcanvas.offcanvas-start { + top: 0; + left: 0; + width: var(--bs-offcanvas-width); + border-right: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateX(-100%); } + .offcanvas.offcanvas-end { + top: 0; + right: 0; + width: var(--bs-offcanvas-width); + border-left: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateX(100%); } + .offcanvas.offcanvas-top { + top: 0; + right: 0; + left: 0; + height: var(--bs-offcanvas-height); + max-height: 100%; + border-bottom: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateY(-100%); } + .offcanvas.offcanvas-bottom { + right: 0; + left: 0; + height: var(--bs-offcanvas-height); + max-height: 100%; + border-top: var(--bs-offcanvas-border-width) solid var(--bs-offcanvas-border-color); + transform: translateY(100%); } + .offcanvas.showing, .offcanvas.show:not(.hiding) { + transform: none; } + .offcanvas.showing, .offcanvas.hiding, .offcanvas.show { + visibility: visible; } + +.offcanvas-backdrop { + position: fixed; + top: 0; + left: 0; + z-index: 1040; + width: 100vw; + height: 100vh; + background-color: #000; } + .offcanvas-backdrop.fade { + opacity: 0; } + .offcanvas-backdrop.show { + opacity: 0.5; } + +.offcanvas-header { + display: flex; + align-items: center; + padding: var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x); } + .offcanvas-header .btn-close { + padding: calc(var(--bs-offcanvas-padding-y) * .5) calc(var(--bs-offcanvas-padding-x) * .5); + margin: calc(-.5 * var(--bs-offcanvas-padding-y)) calc(-.5 * var(--bs-offcanvas-padding-x)) calc(-.5 * var(--bs-offcanvas-padding-y)) auto; } + +.offcanvas-title { + margin-bottom: 0; + line-height: var(--bs-offcanvas-title-line-height); } + +.offcanvas-body { + flex-grow: 1; + padding: var(--bs-offcanvas-padding-y) var(--bs-offcanvas-padding-x); + overflow-y: auto; } + +.placeholder { + display: inline-block; + min-height: 1em; + vertical-align: middle; + cursor: wait; + background-color: currentcolor; + opacity: 0.5; } + .placeholder.btn::before, div.drawio button.placeholder::before, .td-blog .placeholder.td-rss-button::before { + display: inline-block; + content: ""; } + +.placeholder-xs { + min-height: .6em; } + +.placeholder-sm { + min-height: .8em; } + +.placeholder-lg { + min-height: 1.2em; } + +.placeholder-glow .placeholder { + animation: placeholder-glow 2s ease-in-out infinite; } + +@keyframes placeholder-glow { + 50% { + opacity: 0.2; } } + +.placeholder-wave { + mask-image: linear-gradient(130deg, #000 55%, rgba(0, 0, 0, 0.8) 75%, #000 95%); + mask-size: 200% 100%; + animation: placeholder-wave 2s linear infinite; } + +@keyframes placeholder-wave { + 100% { + mask-position: -200% 0%; } } + +.clearfix::after { + display: block; + clear: both; + content: ""; } + +.text-bg-primary { + color: #000 !important; + background-color: RGBA(var(--bs-primary-rgb), var(--bs-bg-opacity, 1)) !important; } + +.text-bg-secondary { + color: #000 !important; + background-color: RGBA(var(--bs-secondary-rgb), var(--bs-bg-opacity, 1)) !important; } + +.text-bg-success { + color: #000 !important; + background-color: RGBA(var(--bs-success-rgb), var(--bs-bg-opacity, 1)) !important; } + +.text-bg-info { + color: #fff !important; + background-color: RGBA(var(--bs-info-rgb), var(--bs-bg-opacity, 1)) !important; } + +.text-bg-warning { + color: #000 !important; + background-color: RGBA(var(--bs-warning-rgb), var(--bs-bg-opacity, 1)) !important; } + +.text-bg-danger { + color: #000 !important; + background-color: RGBA(var(--bs-danger-rgb), var(--bs-bg-opacity, 1)) !important; } + +.text-bg-light { + color: #000 !important; + background-color: RGBA(var(--bs-light-rgb), var(--bs-bg-opacity, 1)) !important; } + +.text-bg-dark { + color: #fff !important; + background-color: RGBA(var(--bs-dark-rgb), var(--bs-bg-opacity, 1)) !important; } + +.link-primary { + color: RGBA(var(--bs-primary-rgb), var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(var(--bs-primary-rgb), var(--bs-link-underline-opacity, 1)) !important; } + .link-primary:hover, .link-primary:focus { + color: RGBA(93, 186, 222, var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(93, 186, 222, var(--bs-link-underline-opacity, 1)) !important; } + +.link-secondary { + color: RGBA(var(--bs-secondary-rgb), var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(var(--bs-secondary-rgb), var(--bs-link-underline-opacity, 1)) !important; } + .link-secondary:hover, .link-secondary:focus { + color: RGBA(255, 219, 77, var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(255, 219, 77, var(--bs-link-underline-opacity, 1)) !important; } + +.link-success { + color: RGBA(var(--bs-success-rgb), var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(var(--bs-success-rgb), var(--bs-link-underline-opacity, 1)) !important; } + .link-success:hover, .link-success:focus { + color: RGBA(141, 189, 89, var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(141, 189, 89, var(--bs-link-underline-opacity, 1)) !important; } + +.link-info { + color: RGBA(var(--bs-info-rgb), var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(var(--bs-info-rgb), var(--bs-link-underline-opacity, 1)) !important; } + .link-info:hover, .link-info:focus { + color: RGBA(71, 81, 81, var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(71, 81, 81, var(--bs-link-underline-opacity, 1)) !important; } + +.link-warning { + color: RGBA(var(--bs-warning-rgb), var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(var(--bs-warning-rgb), var(--bs-link-underline-opacity, 1)) !important; } + .link-warning:hover, .link-warning:focus { + color: RGBA(242, 151, 140, var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(242, 151, 140, var(--bs-link-underline-opacity, 1)) !important; } + +.link-danger { + color: RGBA(var(--bs-danger-rgb), var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(var(--bs-danger-rgb), var(--bs-link-underline-opacity, 1)) !important; } + .link-danger:hover, .link-danger:focus { + color: RGBA(254, 128, 135, var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(254, 128, 135, var(--bs-link-underline-opacity, 1)) !important; } + +.link-light { + color: RGBA(var(--bs-light-rgb), var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(var(--bs-light-rgb), var(--bs-link-underline-opacity, 1)) !important; } + .link-light:hover, .link-light:focus { + color: RGBA(224, 247, 243, var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(224, 247, 243, var(--bs-link-underline-opacity, 1)) !important; } + +.link-dark { + color: RGBA(var(--bs-dark-rgb), var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(var(--bs-dark-rgb), var(--bs-link-underline-opacity, 1)) !important; } + .link-dark:hover, .link-dark:focus { + color: RGBA(45, 44, 53, var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(45, 44, 53, var(--bs-link-underline-opacity, 1)) !important; } + +.link-body-emphasis { + color: RGBA(var(--bs-emphasis-color-rgb), var(--bs-link-opacity, 1)) !important; + text-decoration-color: RGBA(var(--bs-emphasis-color-rgb), var(--bs-link-underline-opacity, 1)) !important; } + .link-body-emphasis:hover, .link-body-emphasis:focus { + color: RGBA(var(--bs-emphasis-color-rgb), var(--bs-link-opacity, 0.75)) !important; + text-decoration-color: RGBA(var(--bs-emphasis-color-rgb), var(--bs-link-underline-opacity, 0.75)) !important; } + +.focus-ring:focus { + outline: 0; + box-shadow: var(--bs-focus-ring-x, 0) var(--bs-focus-ring-y, 0) var(--bs-focus-ring-blur, 0) var(--bs-focus-ring-width) var(--bs-focus-ring-color); } + +.icon-link { + display: inline-flex; + gap: 0.375rem; + align-items: center; + text-decoration-color: rgba(var(--bs-link-color-rgb), var(--bs-link-opacity, 0.5)); + text-underline-offset: 0.25em; + backface-visibility: hidden; } + .icon-link > .bi { + flex-shrink: 0; + width: 1em; + height: 1em; + fill: currentcolor; + transition: 0.2s ease-in-out transform; } + @media (prefers-reduced-motion: reduce) { + .icon-link > .bi { + transition: none; } } +.icon-link-hover:hover > .bi, .icon-link-hover:focus-visible > .bi { + transform: var(--bs-icon-link-transform, translate3d(0.25em, 0, 0)); } + +.ratio { + position: relative; + width: 100%; } + .ratio::before { + display: block; + padding-top: var(--bs-aspect-ratio); + content: ""; } + .ratio > * { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; } + +.ratio-1x1 { + --bs-aspect-ratio: 100%; } + +.ratio-4x3 { + --bs-aspect-ratio: calc(3 / 4 * 100%); } + +.ratio-16x9 { + --bs-aspect-ratio: calc(9 / 16 * 100%); } + +.ratio-21x9 { + --bs-aspect-ratio: calc(9 / 21 * 100%); } + +.fixed-top { + position: fixed; + top: 0; + right: 0; + left: 0; + z-index: 1030; } + +.fixed-bottom { + position: fixed; + right: 0; + bottom: 0; + left: 0; + z-index: 1030; } + +.sticky-top { + position: sticky; + top: 0; + z-index: 1020; } + +.sticky-bottom { + position: sticky; + bottom: 0; + z-index: 1020; } + +@media (min-width: 576px) { + .sticky-sm-top { + position: sticky; + top: 0; + z-index: 1020; } + .sticky-sm-bottom { + position: sticky; + bottom: 0; + z-index: 1020; } } + +@media (min-width: 768px) { + .sticky-md-top { + position: sticky; + top: 0; + z-index: 1020; } + .sticky-md-bottom { + position: sticky; + bottom: 0; + z-index: 1020; } } + +@media (min-width: 992px) { + .sticky-lg-top { + position: sticky; + top: 0; + z-index: 1020; } + .sticky-lg-bottom { + position: sticky; + bottom: 0; + z-index: 1020; } } + +@media (min-width: 1200px) { + .sticky-xl-top { + position: sticky; + top: 0; + z-index: 1020; } + .sticky-xl-bottom { + position: sticky; + bottom: 0; + z-index: 1020; } } + +@media (min-width: 1400px) { + .sticky-xxl-top { + position: sticky; + top: 0; + z-index: 1020; } + .sticky-xxl-bottom { + position: sticky; + bottom: 0; + z-index: 1020; } } + +.hstack { + display: flex; + flex-direction: row; + align-items: center; + align-self: stretch; } + +.vstack { + display: flex; + flex: 1 1 auto; + flex-direction: column; + align-self: stretch; } + +.visually-hidden, +.visually-hidden-focusable:not(:focus):not(:focus-within) { + width: 1px !important; + height: 1px !important; + padding: 0 !important; + margin: -1px !important; + overflow: hidden !important; + clip: rect(0, 0, 0, 0) !important; + white-space: nowrap !important; + border: 0 !important; } + .visually-hidden:not(caption), + .visually-hidden-focusable:not(:focus):not(:focus-within):not(caption) { + position: absolute !important; } + +.stretched-link::after { + position: absolute; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: 1; + content: ""; } + +.text-truncate { + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; } + +.vr { + display: inline-block; + align-self: stretch; + width: var(--bs-border-width); + min-height: 1em; + background-color: currentcolor; + opacity: 0.25; } + +.align-baseline { + vertical-align: baseline !important; } + +.align-top { + vertical-align: top !important; } + +.align-middle { + vertical-align: middle !important; } + +.align-bottom { + vertical-align: bottom !important; } + +.align-text-bottom { + vertical-align: text-bottom !important; } + +.align-text-top { + vertical-align: text-top !important; } + +.float-start { + float: left !important; } + +.float-end { + float: right !important; } + +.float-none { + float: none !important; } + +.object-fit-contain { + object-fit: contain !important; } + +.object-fit-cover { + object-fit: cover !important; } + +.object-fit-fill { + object-fit: fill !important; } + +.object-fit-scale { + object-fit: scale-down !important; } + +.object-fit-none { + object-fit: none !important; } + +.opacity-0 { + opacity: 0 !important; } + +.opacity-25 { + opacity: 0.25 !important; } + +.opacity-50 { + opacity: 0.5 !important; } + +.opacity-75 { + opacity: 0.75 !important; } + +.opacity-100 { + opacity: 1 !important; } + +.overflow-auto { + overflow: auto !important; } + +.overflow-hidden { + overflow: hidden !important; } + +.overflow-visible { + overflow: visible !important; } + +.overflow-scroll { + overflow: scroll !important; } + +.overflow-x-auto { + overflow-x: auto !important; } + +.overflow-x-hidden { + overflow-x: hidden !important; } + +.overflow-x-visible { + overflow-x: visible !important; } + +.overflow-x-scroll { + overflow-x: scroll !important; } + +.overflow-y-auto { + overflow-y: auto !important; } + +.overflow-y-hidden { + overflow-y: hidden !important; } + +.overflow-y-visible { + overflow-y: visible !important; } + +.overflow-y-scroll { + overflow-y: scroll !important; } + +.d-inline { + display: inline !important; } + +.d-inline-block { + display: inline-block !important; } + +.d-block { + display: block !important; } + +.d-grid { + display: grid !important; } + +.d-inline-grid { + display: inline-grid !important; } + +.d-table { + display: table !important; } + +.d-table-row { + display: table-row !important; } + +.d-table-cell { + display: table-cell !important; } + +.d-flex { + display: flex !important; } + +.d-inline-flex { + display: inline-flex !important; } + +.d-none { + display: none !important; } + +.shadow { + box-shadow: var(--bs-box-shadow) !important; } + +.shadow-sm { + box-shadow: var(--bs-box-shadow-sm) !important; } + +.shadow-lg { + box-shadow: var(--bs-box-shadow-lg) !important; } + +.shadow-none { + box-shadow: none !important; } + +.focus-ring-primary { + --bs-focus-ring-color: rgba(var(--bs-primary-rgb), var(--bs-focus-ring-opacity)); } + +.focus-ring-secondary { + --bs-focus-ring-color: rgba(var(--bs-secondary-rgb), var(--bs-focus-ring-opacity)); } + +.focus-ring-success { + --bs-focus-ring-color: rgba(var(--bs-success-rgb), var(--bs-focus-ring-opacity)); } + +.focus-ring-info { + --bs-focus-ring-color: rgba(var(--bs-info-rgb), var(--bs-focus-ring-opacity)); } + +.focus-ring-warning { + --bs-focus-ring-color: rgba(var(--bs-warning-rgb), var(--bs-focus-ring-opacity)); } + +.focus-ring-danger { + --bs-focus-ring-color: rgba(var(--bs-danger-rgb), var(--bs-focus-ring-opacity)); } + +.focus-ring-light { + --bs-focus-ring-color: rgba(var(--bs-light-rgb), var(--bs-focus-ring-opacity)); } + +.focus-ring-dark { + --bs-focus-ring-color: rgba(var(--bs-dark-rgb), var(--bs-focus-ring-opacity)); } + +.position-static { + position: static !important; } + +.position-relative { + position: relative !important; } + +.position-absolute { + position: absolute !important; } + +.position-fixed { + position: fixed !important; } + +.position-sticky { + position: sticky !important; } + +.top-0 { + top: 0 !important; } + +.top-50 { + top: 50% !important; } + +.top-100 { + top: 100% !important; } + +.bottom-0 { + bottom: 0 !important; } + +.bottom-50 { + bottom: 50% !important; } + +.bottom-100 { + bottom: 100% !important; } + +.start-0 { + left: 0 !important; } + +.start-50 { + left: 50% !important; } + +.start-100 { + left: 100% !important; } + +.end-0 { + right: 0 !important; } + +.end-50 { + right: 50% !important; } + +.end-100 { + right: 100% !important; } + +.translate-middle { + transform: translate(-50%, -50%) !important; } + +.translate-middle-x { + transform: translateX(-50%) !important; } + +.translate-middle-y { + transform: translateY(-50%) !important; } + +.border { + border: var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important; } + +.border-0 { + border: 0 !important; } + +.border-top, .td-page-meta__lastmod { + border-top: var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important; } + +.border-top-0 { + border-top: 0 !important; } + +.border-end { + border-right: var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important; } + +.border-end-0 { + border-right: 0 !important; } + +.border-bottom { + border-bottom: var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important; } + +.border-bottom-0 { + border-bottom: 0 !important; } + +.border-start { + border-left: var(--bs-border-width) var(--bs-border-style) var(--bs-border-color) !important; } + +.border-start-0 { + border-left: 0 !important; } + +.border-primary { + --bs-border-opacity: 1; + border-color: rgba(var(--bs-primary-rgb), var(--bs-border-opacity)) !important; } + +.border-secondary { + --bs-border-opacity: 1; + border-color: rgba(var(--bs-secondary-rgb), var(--bs-border-opacity)) !important; } + +.border-success { + --bs-border-opacity: 1; + border-color: rgba(var(--bs-success-rgb), var(--bs-border-opacity)) !important; } + +.border-info { + --bs-border-opacity: 1; + border-color: rgba(var(--bs-info-rgb), var(--bs-border-opacity)) !important; } + +.border-warning { + --bs-border-opacity: 1; + border-color: rgba(var(--bs-warning-rgb), var(--bs-border-opacity)) !important; } + +.border-danger { + --bs-border-opacity: 1; + border-color: rgba(var(--bs-danger-rgb), var(--bs-border-opacity)) !important; } + +.border-light { + --bs-border-opacity: 1; + border-color: rgba(var(--bs-light-rgb), var(--bs-border-opacity)) !important; } + +.border-dark { + --bs-border-opacity: 1; + border-color: rgba(var(--bs-dark-rgb), var(--bs-border-opacity)) !important; } + +.border-black { + --bs-border-opacity: 1; + border-color: rgba(var(--bs-black-rgb), var(--bs-border-opacity)) !important; } + +.border-white { + --bs-border-opacity: 1; + border-color: rgba(var(--bs-white-rgb), var(--bs-border-opacity)) !important; } + +.border-primary-subtle { + border-color: var(--bs-primary-border-subtle) !important; } + +.border-secondary-subtle { + border-color: var(--bs-secondary-border-subtle) !important; } + +.border-success-subtle { + border-color: var(--bs-success-border-subtle) !important; } + +.border-info-subtle { + border-color: var(--bs-info-border-subtle) !important; } + +.border-warning-subtle { + border-color: var(--bs-warning-border-subtle) !important; } + +.border-danger-subtle { + border-color: var(--bs-danger-border-subtle) !important; } + +.border-light-subtle { + border-color: var(--bs-light-border-subtle) !important; } + +.border-dark-subtle { + border-color: var(--bs-dark-border-subtle) !important; } + +.border-1 { + border-width: 1px !important; } + +.border-2 { + border-width: 2px !important; } + +.border-3 { + border-width: 3px !important; } + +.border-4 { + border-width: 4px !important; } + +.border-5 { + border-width: 5px !important; } + +.border-opacity-10 { + --bs-border-opacity: 0.1; } + +.border-opacity-25 { + --bs-border-opacity: 0.25; } + +.border-opacity-50 { + --bs-border-opacity: 0.5; } + +.border-opacity-75 { + --bs-border-opacity: 0.75; } + +.border-opacity-100 { + --bs-border-opacity: 1; } + +.w-25 { + width: 25% !important; } + +.w-50 { + width: 50% !important; } + +.w-75 { + width: 75% !important; } + +.w-100 { + width: 100% !important; } + +.w-auto { + width: auto !important; } + +.mw-100 { + max-width: 100% !important; } + +.vw-100 { + width: 100vw !important; } + +.min-vw-100 { + min-width: 100vw !important; } + +.h-25 { + height: 25% !important; } + +.h-50 { + height: 50% !important; } + +.h-75 { + height: 75% !important; } + +.h-100 { + height: 100% !important; } + +.h-auto { + height: auto !important; } + +.mh-100 { + max-height: 100% !important; } + +.vh-100 { + height: 100vh !important; } + +.min-vh-100 { + min-height: 100vh !important; } + +.flex-fill { + flex: 1 1 auto !important; } + +.flex-row { + flex-direction: row !important; } + +.flex-column { + flex-direction: column !important; } + +.flex-row-reverse { + flex-direction: row-reverse !important; } + +.flex-column-reverse { + flex-direction: column-reverse !important; } + +.flex-grow-0 { + flex-grow: 0 !important; } + +.flex-grow-1 { + flex-grow: 1 !important; } + +.flex-shrink-0 { + flex-shrink: 0 !important; } + +.flex-shrink-1 { + flex-shrink: 1 !important; } + +.flex-wrap { + flex-wrap: wrap !important; } + +.flex-nowrap { + flex-wrap: nowrap !important; } + +.flex-wrap-reverse { + flex-wrap: wrap-reverse !important; } + +.justify-content-start { + justify-content: flex-start !important; } + +.justify-content-end { + justify-content: flex-end !important; } + +.justify-content-center { + justify-content: center !important; } + +.justify-content-between { + justify-content: space-between !important; } + +.justify-content-around { + justify-content: space-around !important; } + +.justify-content-evenly { + justify-content: space-evenly !important; } + +.align-items-start { + align-items: flex-start !important; } + +.align-items-end { + align-items: flex-end !important; } + +.align-items-center { + align-items: center !important; } + +.align-items-baseline { + align-items: baseline !important; } + +.align-items-stretch { + align-items: stretch !important; } + +.align-content-start { + align-content: flex-start !important; } + +.align-content-end { + align-content: flex-end !important; } + +.align-content-center { + align-content: center !important; } + +.align-content-between { + align-content: space-between !important; } + +.align-content-around { + align-content: space-around !important; } + +.align-content-stretch { + align-content: stretch !important; } + +.align-self-auto { + align-self: auto !important; } + +.align-self-start { + align-self: flex-start !important; } + +.align-self-end { + align-self: flex-end !important; } + +.align-self-center { + align-self: center !important; } + +.align-self-baseline { + align-self: baseline !important; } + +.align-self-stretch { + align-self: stretch !important; } + +.order-first { + order: -1 !important; } + +.order-0 { + order: 0 !important; } + +.order-1 { + order: 1 !important; } + +.order-2 { + order: 2 !important; } + +.order-3 { + order: 3 !important; } + +.order-4 { + order: 4 !important; } + +.order-5 { + order: 5 !important; } + +.order-last { + order: 6 !important; } + +.m-0 { + margin: 0 !important; } + +.m-1 { + margin: 0.25rem !important; } + +.m-2 { + margin: 0.5rem !important; } + +.m-3 { + margin: 1rem !important; } + +.m-4 { + margin: 1.5rem !important; } + +.m-5 { + margin: 3rem !important; } + +.m-auto { + margin: auto !important; } + +.mx-0 { + margin-right: 0 !important; + margin-left: 0 !important; } + +.mx-1 { + margin-right: 0.25rem !important; + margin-left: 0.25rem !important; } + +.mx-2 { + margin-right: 0.5rem !important; + margin-left: 0.5rem !important; } + +.mx-3 { + margin-right: 1rem !important; + margin-left: 1rem !important; } + +.mx-4 { + margin-right: 1.5rem !important; + margin-left: 1.5rem !important; } + +.mx-5 { + margin-right: 3rem !important; + margin-left: 3rem !important; } + +.mx-auto { + margin-right: auto !important; + margin-left: auto !important; } + +.my-0 { + margin-top: 0 !important; + margin-bottom: 0 !important; } + +.my-1 { + margin-top: 0.25rem !important; + margin-bottom: 0.25rem !important; } + +.my-2 { + margin-top: 0.5rem !important; + margin-bottom: 0.5rem !important; } + +.my-3 { + margin-top: 1rem !important; + margin-bottom: 1rem !important; } + +.my-4 { + margin-top: 1.5rem !important; + margin-bottom: 1.5rem !important; } + +.my-5 { + margin-top: 3rem !important; + margin-bottom: 3rem !important; } + +.my-auto { + margin-top: auto !important; + margin-bottom: auto !important; } + +.mt-0 { + margin-top: 0 !important; } + +.mt-1 { + margin-top: 0.25rem !important; } + +.mt-2 { + margin-top: 0.5rem !important; } + +.mt-3 { + margin-top: 1rem !important; } + +.mt-4 { + margin-top: 1.5rem !important; } + +.mt-5 { + margin-top: 3rem !important; } + +.mt-auto { + margin-top: auto !important; } + +.me-0 { + margin-right: 0 !important; } + +.me-1 { + margin-right: 0.25rem !important; } + +.me-2 { + margin-right: 0.5rem !important; } + +.me-3 { + margin-right: 1rem !important; } + +.me-4 { + margin-right: 1.5rem !important; } + +.me-5 { + margin-right: 3rem !important; } + +.me-auto { + margin-right: auto !important; } + +.mb-0 { + margin-bottom: 0 !important; } + +.mb-1 { + margin-bottom: 0.25rem !important; } + +.mb-2 { + margin-bottom: 0.5rem !important; } + +.mb-3 { + margin-bottom: 1rem !important; } + +.mb-4 { + margin-bottom: 1.5rem !important; } + +.mb-5 { + margin-bottom: 3rem !important; } + +.mb-auto { + margin-bottom: auto !important; } + +.ms-0 { + margin-left: 0 !important; } + +.ms-1 { + margin-left: 0.25rem !important; } + +.ms-2 { + margin-left: 0.5rem !important; } + +.ms-3 { + margin-left: 1rem !important; } + +.ms-4 { + margin-left: 1.5rem !important; } + +.ms-5 { + margin-left: 3rem !important; } + +.ms-auto { + margin-left: auto !important; } + +.p-0 { + padding: 0 !important; } + +.p-1 { + padding: 0.25rem !important; } + +.p-2 { + padding: 0.5rem !important; } + +.p-3 { + padding: 1rem !important; } + +.p-4 { + padding: 1.5rem !important; } + +.p-5 { + padding: 3rem !important; } + +.px-0 { + padding-right: 0 !important; + padding-left: 0 !important; } + +.px-1 { + padding-right: 0.25rem !important; + padding-left: 0.25rem !important; } + +.px-2 { + padding-right: 0.5rem !important; + padding-left: 0.5rem !important; } + +.px-3 { + padding-right: 1rem !important; + padding-left: 1rem !important; } + +.px-4 { + padding-right: 1.5rem !important; + padding-left: 1.5rem !important; } + +.px-5 { + padding-right: 3rem !important; + padding-left: 3rem !important; } + +.py-0 { + padding-top: 0 !important; + padding-bottom: 0 !important; } + +.py-1 { + padding-top: 0.25rem !important; + padding-bottom: 0.25rem !important; } + +.py-2 { + padding-top: 0.5rem !important; + padding-bottom: 0.5rem !important; } + +.py-3 { + padding-top: 1rem !important; + padding-bottom: 1rem !important; } + +.py-4 { + padding-top: 1.5rem !important; + padding-bottom: 1.5rem !important; } + +.py-5 { + padding-top: 3rem !important; + padding-bottom: 3rem !important; } + +.pt-0 { + padding-top: 0 !important; } + +.pt-1 { + padding-top: 0.25rem !important; } + +.pt-2 { + padding-top: 0.5rem !important; } + +.pt-3 { + padding-top: 1rem !important; } + +.pt-4 { + padding-top: 1.5rem !important; } + +.pt-5 { + padding-top: 3rem !important; } + +.pe-0 { + padding-right: 0 !important; } + +.pe-1 { + padding-right: 0.25rem !important; } + +.pe-2 { + padding-right: 0.5rem !important; } + +.pe-3 { + padding-right: 1rem !important; } + +.pe-4 { + padding-right: 1.5rem !important; } + +.pe-5 { + padding-right: 3rem !important; } + +.pb-0 { + padding-bottom: 0 !important; } + +.pb-1 { + padding-bottom: 0.25rem !important; } + +.pb-2 { + padding-bottom: 0.5rem !important; } + +.pb-3 { + padding-bottom: 1rem !important; } + +.pb-4 { + padding-bottom: 1.5rem !important; } + +.pb-5 { + padding-bottom: 3rem !important; } + +.ps-0 { + padding-left: 0 !important; } + +.ps-1 { + padding-left: 0.25rem !important; } + +.ps-2 { + padding-left: 0.5rem !important; } + +.ps-3 { + padding-left: 1rem !important; } + +.ps-4 { + padding-left: 1.5rem !important; } + +.ps-5 { + padding-left: 3rem !important; } + +.gap-0 { + gap: 0 !important; } + +.gap-1 { + gap: 0.25rem !important; } + +.gap-2 { + gap: 0.5rem !important; } + +.gap-3 { + gap: 1rem !important; } + +.gap-4 { + gap: 1.5rem !important; } + +.gap-5 { + gap: 3rem !important; } + +.row-gap-0 { + row-gap: 0 !important; } + +.row-gap-1 { + row-gap: 0.25rem !important; } + +.row-gap-2 { + row-gap: 0.5rem !important; } + +.row-gap-3 { + row-gap: 1rem !important; } + +.row-gap-4 { + row-gap: 1.5rem !important; } + +.row-gap-5 { + row-gap: 3rem !important; } + +.column-gap-0 { + column-gap: 0 !important; } + +.column-gap-1 { + column-gap: 0.25rem !important; } + +.column-gap-2 { + column-gap: 0.5rem !important; } + +.column-gap-3 { + column-gap: 1rem !important; } + +.column-gap-4 { + column-gap: 1.5rem !important; } + +.column-gap-5 { + column-gap: 3rem !important; } + +.font-monospace { + font-family: var(--bs-font-monospace) !important; } + +.fs-1 { + font-size: calc(1.375rem + 1.5vw) !important; } + +.fs-2 { + font-size: calc(1.325rem + 0.9vw) !important; } + +.fs-3 { + font-size: calc(1.275rem + 0.3vw) !important; } + +.fs-4 { + font-size: calc(1.26rem + 0.12vw) !important; } + +.fs-5 { + font-size: 1.15rem !important; } + +.fs-6 { + font-size: 1rem !important; } + +.fst-italic { + font-style: italic !important; } + +.fst-normal { + font-style: normal !important; } + +.fw-lighter { + font-weight: lighter !important; } + +.fw-light { + font-weight: 300 !important; } + +.fw-normal { + font-weight: 400 !important; } + +.fw-medium { + font-weight: 500 !important; } + +.fw-semibold { + font-weight: 600 !important; } + +.fw-bold { + font-weight: 700 !important; } + +.fw-bolder { + font-weight: bolder !important; } + +.lh-1 { + line-height: 1 !important; } + +.lh-sm { + line-height: 1.25 !important; } + +.lh-base { + line-height: 1.5 !important; } + +.lh-lg { + line-height: 2 !important; } + +.text-start { + text-align: left !important; } + +.text-end { + text-align: right !important; } + +.text-center { + text-align: center !important; } + +.text-decoration-none { + text-decoration: none !important; } + +.text-decoration-underline { + text-decoration: underline !important; } + +.text-decoration-line-through { + text-decoration: line-through !important; } + +.text-lowercase { + text-transform: lowercase !important; } + +.text-uppercase { + text-transform: uppercase !important; } + +.text-capitalize { + text-transform: capitalize !important; } + +.text-wrap { + white-space: normal !important; } + +.text-nowrap { + white-space: nowrap !important; } + +/* rtl:begin:remove */ +.text-break { + word-wrap: break-word !important; + word-break: break-word !important; } + +/* rtl:end:remove */ +.text-primary { + --bs-text-opacity: 1; + color: rgba(var(--bs-primary-rgb), var(--bs-text-opacity)) !important; } + +.text-secondary { + --bs-text-opacity: 1; + color: rgba(var(--bs-secondary-rgb), var(--bs-text-opacity)) !important; } + +.text-success { + --bs-text-opacity: 1; + color: rgba(var(--bs-success-rgb), var(--bs-text-opacity)) !important; } + +.text-info { + --bs-text-opacity: 1; + color: rgba(var(--bs-info-rgb), var(--bs-text-opacity)) !important; } + +.text-warning { + --bs-text-opacity: 1; + color: rgba(var(--bs-warning-rgb), var(--bs-text-opacity)) !important; } + +.text-danger { + --bs-text-opacity: 1; + color: rgba(var(--bs-danger-rgb), var(--bs-text-opacity)) !important; } + +.text-light { + --bs-text-opacity: 1; + color: rgba(var(--bs-light-rgb), var(--bs-text-opacity)) !important; } + +.text-dark { + --bs-text-opacity: 1; + color: rgba(var(--bs-dark-rgb), var(--bs-text-opacity)) !important; } + +.text-black { + --bs-text-opacity: 1; + color: rgba(var(--bs-black-rgb), var(--bs-text-opacity)) !important; } + +.text-white { + --bs-text-opacity: 1; + color: rgba(var(--bs-white-rgb), var(--bs-text-opacity)) !important; } + +.text-body { + --bs-text-opacity: 1; + color: rgba(var(--bs-body-color-rgb), var(--bs-text-opacity)) !important; } + +.text-muted { + --bs-text-opacity: 1; + color: var(--bs-secondary-color) !important; } + +.text-black-50 { + --bs-text-opacity: 1; + color: rgba(0, 0, 0, 0.5) !important; } + +.text-white-50 { + --bs-text-opacity: 1; + color: rgba(255, 255, 255, 0.5) !important; } + +.text-body-secondary, .td-page-meta__lastmod { + --bs-text-opacity: 1; + color: var(--bs-secondary-color) !important; } + +.text-body-tertiary { + --bs-text-opacity: 1; + color: var(--bs-tertiary-color) !important; } + +.text-body-emphasis { + --bs-text-opacity: 1; + color: var(--bs-emphasis-color) !important; } + +.text-reset { + --bs-text-opacity: 1; + color: inherit !important; } + +.text-opacity-25 { + --bs-text-opacity: 0.25; } + +.text-opacity-50 { + --bs-text-opacity: 0.5; } + +.text-opacity-75 { + --bs-text-opacity: 0.75; } + +.text-opacity-100 { + --bs-text-opacity: 1; } + +.text-primary-emphasis { + color: var(--bs-primary-text-emphasis) !important; } + +.text-secondary-emphasis { + color: var(--bs-secondary-text-emphasis) !important; } + +.text-success-emphasis { + color: var(--bs-success-text-emphasis) !important; } + +.text-info-emphasis { + color: var(--bs-info-text-emphasis) !important; } + +.text-warning-emphasis { + color: var(--bs-warning-text-emphasis) !important; } + +.text-danger-emphasis { + color: var(--bs-danger-text-emphasis) !important; } + +.text-light-emphasis { + color: var(--bs-light-text-emphasis) !important; } + +.text-dark-emphasis { + color: var(--bs-dark-text-emphasis) !important; } + +.link-opacity-10 { + --bs-link-opacity: 0.1; } + +.link-opacity-10-hover:hover { + --bs-link-opacity: 0.1; } + +.link-opacity-25 { + --bs-link-opacity: 0.25; } + +.link-opacity-25-hover:hover { + --bs-link-opacity: 0.25; } + +.link-opacity-50 { + --bs-link-opacity: 0.5; } + +.link-opacity-50-hover:hover { + --bs-link-opacity: 0.5; } + +.link-opacity-75 { + --bs-link-opacity: 0.75; } + +.link-opacity-75-hover:hover { + --bs-link-opacity: 0.75; } + +.link-opacity-100 { + --bs-link-opacity: 1; } + +.link-opacity-100-hover:hover { + --bs-link-opacity: 1; } + +.link-offset-1 { + text-underline-offset: 0.125em !important; } + +.link-offset-1-hover:hover { + text-underline-offset: 0.125em !important; } + +.link-offset-2 { + text-underline-offset: 0.25em !important; } + +.link-offset-2-hover:hover { + text-underline-offset: 0.25em !important; } + +.link-offset-3 { + text-underline-offset: 0.375em !important; } + +.link-offset-3-hover:hover { + text-underline-offset: 0.375em !important; } + +.link-underline-primary { + --bs-link-underline-opacity: 1; + text-decoration-color: rgba(var(--bs-primary-rgb), var(--bs-link-underline-opacity)) !important; } + +.link-underline-secondary { + --bs-link-underline-opacity: 1; + text-decoration-color: rgba(var(--bs-secondary-rgb), var(--bs-link-underline-opacity)) !important; } + +.link-underline-success { + --bs-link-underline-opacity: 1; + text-decoration-color: rgba(var(--bs-success-rgb), var(--bs-link-underline-opacity)) !important; } + +.link-underline-info { + --bs-link-underline-opacity: 1; + text-decoration-color: rgba(var(--bs-info-rgb), var(--bs-link-underline-opacity)) !important; } + +.link-underline-warning { + --bs-link-underline-opacity: 1; + text-decoration-color: rgba(var(--bs-warning-rgb), var(--bs-link-underline-opacity)) !important; } + +.link-underline-danger { + --bs-link-underline-opacity: 1; + text-decoration-color: rgba(var(--bs-danger-rgb), var(--bs-link-underline-opacity)) !important; } + +.link-underline-light { + --bs-link-underline-opacity: 1; + text-decoration-color: rgba(var(--bs-light-rgb), var(--bs-link-underline-opacity)) !important; } + +.link-underline-dark { + --bs-link-underline-opacity: 1; + text-decoration-color: rgba(var(--bs-dark-rgb), var(--bs-link-underline-opacity)) !important; } + +.link-underline { + --bs-link-underline-opacity: 1; + text-decoration-color: rgba(var(--bs-link-color-rgb), var(--bs-link-underline-opacity, 1)) !important; } + +.link-underline-opacity-0 { + --bs-link-underline-opacity: 0; } + +.link-underline-opacity-0-hover:hover { + --bs-link-underline-opacity: 0; } + +.link-underline-opacity-10 { + --bs-link-underline-opacity: 0.1; } + +.link-underline-opacity-10-hover:hover { + --bs-link-underline-opacity: 0.1; } + +.link-underline-opacity-25 { + --bs-link-underline-opacity: 0.25; } + +.link-underline-opacity-25-hover:hover { + --bs-link-underline-opacity: 0.25; } + +.link-underline-opacity-50 { + --bs-link-underline-opacity: 0.5; } + +.link-underline-opacity-50-hover:hover { + --bs-link-underline-opacity: 0.5; } + +.link-underline-opacity-75 { + --bs-link-underline-opacity: 0.75; } + +.link-underline-opacity-75-hover:hover { + --bs-link-underline-opacity: 0.75; } + +.link-underline-opacity-100 { + --bs-link-underline-opacity: 1; } + +.link-underline-opacity-100-hover:hover { + --bs-link-underline-opacity: 1; } + +.bg-primary { + --bs-bg-opacity: 1; + background-color: rgba(var(--bs-primary-rgb), var(--bs-bg-opacity)) !important; } + +.bg-secondary { + --bs-bg-opacity: 1; + background-color: rgba(var(--bs-secondary-rgb), var(--bs-bg-opacity)) !important; } + +.bg-success { + --bs-bg-opacity: 1; + background-color: rgba(var(--bs-success-rgb), var(--bs-bg-opacity)) !important; } + +.bg-info { + --bs-bg-opacity: 1; + background-color: rgba(var(--bs-info-rgb), var(--bs-bg-opacity)) !important; } + +.bg-warning { + --bs-bg-opacity: 1; + background-color: rgba(var(--bs-warning-rgb), var(--bs-bg-opacity)) !important; } + +.bg-danger { + --bs-bg-opacity: 1; + background-color: rgba(var(--bs-danger-rgb), var(--bs-bg-opacity)) !important; } + +.bg-light { + --bs-bg-opacity: 1; + background-color: rgba(var(--bs-light-rgb), var(--bs-bg-opacity)) !important; } + +.bg-dark { + --bs-bg-opacity: 1; + background-color: rgba(var(--bs-dark-rgb), var(--bs-bg-opacity)) !important; } + +.bg-black { + --bs-bg-opacity: 1; + background-color: rgba(var(--bs-black-rgb), var(--bs-bg-opacity)) !important; } + +.bg-white { + --bs-bg-opacity: 1; + background-color: rgba(var(--bs-white-rgb), var(--bs-bg-opacity)) !important; } + +.bg-body { + --bs-bg-opacity: 1; + background-color: rgba(var(--bs-body-bg-rgb), var(--bs-bg-opacity)) !important; } + +.bg-transparent { + --bs-bg-opacity: 1; + background-color: transparent !important; } + +.bg-body-secondary { + --bs-bg-opacity: 1; + background-color: rgba(var(--bs-secondary-bg-rgb), var(--bs-bg-opacity)) !important; } + +.bg-body-tertiary { + --bs-bg-opacity: 1; + background-color: rgba(var(--bs-tertiary-bg-rgb), var(--bs-bg-opacity)) !important; } + +.bg-opacity-10 { + --bs-bg-opacity: 0.1; } + +.bg-opacity-25 { + --bs-bg-opacity: 0.25; } + +.bg-opacity-50 { + --bs-bg-opacity: 0.5; } + +.bg-opacity-75 { + --bs-bg-opacity: 0.75; } + +.bg-opacity-100 { + --bs-bg-opacity: 1; } + +.bg-primary-subtle { + background-color: var(--bs-primary-bg-subtle) !important; } + +.bg-secondary-subtle { + background-color: var(--bs-secondary-bg-subtle) !important; } + +.bg-success-subtle { + background-color: var(--bs-success-bg-subtle) !important; } + +.bg-info-subtle { + background-color: var(--bs-info-bg-subtle) !important; } + +.bg-warning-subtle { + background-color: var(--bs-warning-bg-subtle) !important; } + +.bg-danger-subtle { + background-color: var(--bs-danger-bg-subtle) !important; } + +.bg-light-subtle { + background-color: var(--bs-light-bg-subtle) !important; } + +.bg-dark-subtle { + background-color: var(--bs-dark-bg-subtle) !important; } + +.bg-gradient { + background-image: var(--bs-gradient) !important; } + +.user-select-all { + user-select: all !important; } + +.user-select-auto { + user-select: auto !important; } + +.user-select-none { + user-select: none !important; } + +.pe-none { + pointer-events: none !important; } + +.pe-auto { + pointer-events: auto !important; } + +.rounded { + border-radius: var(--bs-border-radius) !important; } + +.rounded-0 { + border-radius: 0 !important; } + +.rounded-1 { + border-radius: var(--bs-border-radius-sm) !important; } + +.rounded-2 { + border-radius: var(--bs-border-radius) !important; } + +.rounded-3 { + border-radius: var(--bs-border-radius-lg) !important; } + +.rounded-4 { + border-radius: var(--bs-border-radius-xl) !important; } + +.rounded-5 { + border-radius: var(--bs-border-radius-xxl) !important; } + +.rounded-circle { + border-radius: 50% !important; } + +.rounded-pill { + border-radius: var(--bs-border-radius-pill) !important; } + +.rounded-top { + border-top-left-radius: var(--bs-border-radius) !important; + border-top-right-radius: var(--bs-border-radius) !important; } + +.rounded-top-0 { + border-top-left-radius: 0 !important; + border-top-right-radius: 0 !important; } + +.rounded-top-1 { + border-top-left-radius: var(--bs-border-radius-sm) !important; + border-top-right-radius: var(--bs-border-radius-sm) !important; } + +.rounded-top-2 { + border-top-left-radius: var(--bs-border-radius) !important; + border-top-right-radius: var(--bs-border-radius) !important; } + +.rounded-top-3 { + border-top-left-radius: var(--bs-border-radius-lg) !important; + border-top-right-radius: var(--bs-border-radius-lg) !important; } + +.rounded-top-4 { + border-top-left-radius: var(--bs-border-radius-xl) !important; + border-top-right-radius: var(--bs-border-radius-xl) !important; } + +.rounded-top-5 { + border-top-left-radius: var(--bs-border-radius-xxl) !important; + border-top-right-radius: var(--bs-border-radius-xxl) !important; } + +.rounded-top-circle { + border-top-left-radius: 50% !important; + border-top-right-radius: 50% !important; } + +.rounded-top-pill { + border-top-left-radius: var(--bs-border-radius-pill) !important; + border-top-right-radius: var(--bs-border-radius-pill) !important; } + +.rounded-end { + border-top-right-radius: var(--bs-border-radius) !important; + border-bottom-right-radius: var(--bs-border-radius) !important; } + +.rounded-end-0 { + border-top-right-radius: 0 !important; + border-bottom-right-radius: 0 !important; } + +.rounded-end-1 { + border-top-right-radius: var(--bs-border-radius-sm) !important; + border-bottom-right-radius: var(--bs-border-radius-sm) !important; } + +.rounded-end-2 { + border-top-right-radius: var(--bs-border-radius) !important; + border-bottom-right-radius: var(--bs-border-radius) !important; } + +.rounded-end-3 { + border-top-right-radius: var(--bs-border-radius-lg) !important; + border-bottom-right-radius: var(--bs-border-radius-lg) !important; } + +.rounded-end-4 { + border-top-right-radius: var(--bs-border-radius-xl) !important; + border-bottom-right-radius: var(--bs-border-radius-xl) !important; } + +.rounded-end-5 { + border-top-right-radius: var(--bs-border-radius-xxl) !important; + border-bottom-right-radius: var(--bs-border-radius-xxl) !important; } + +.rounded-end-circle { + border-top-right-radius: 50% !important; + border-bottom-right-radius: 50% !important; } + +.rounded-end-pill { + border-top-right-radius: var(--bs-border-radius-pill) !important; + border-bottom-right-radius: var(--bs-border-radius-pill) !important; } + +.rounded-bottom { + border-bottom-right-radius: var(--bs-border-radius) !important; + border-bottom-left-radius: var(--bs-border-radius) !important; } + +.rounded-bottom-0 { + border-bottom-right-radius: 0 !important; + border-bottom-left-radius: 0 !important; } + +.rounded-bottom-1 { + border-bottom-right-radius: var(--bs-border-radius-sm) !important; + border-bottom-left-radius: var(--bs-border-radius-sm) !important; } + +.rounded-bottom-2 { + border-bottom-right-radius: var(--bs-border-radius) !important; + border-bottom-left-radius: var(--bs-border-radius) !important; } + +.rounded-bottom-3 { + border-bottom-right-radius: var(--bs-border-radius-lg) !important; + border-bottom-left-radius: var(--bs-border-radius-lg) !important; } + +.rounded-bottom-4 { + border-bottom-right-radius: var(--bs-border-radius-xl) !important; + border-bottom-left-radius: var(--bs-border-radius-xl) !important; } + +.rounded-bottom-5 { + border-bottom-right-radius: var(--bs-border-radius-xxl) !important; + border-bottom-left-radius: var(--bs-border-radius-xxl) !important; } + +.rounded-bottom-circle { + border-bottom-right-radius: 50% !important; + border-bottom-left-radius: 50% !important; } + +.rounded-bottom-pill { + border-bottom-right-radius: var(--bs-border-radius-pill) !important; + border-bottom-left-radius: var(--bs-border-radius-pill) !important; } + +.rounded-start { + border-bottom-left-radius: var(--bs-border-radius) !important; + border-top-left-radius: var(--bs-border-radius) !important; } + +.rounded-start-0 { + border-bottom-left-radius: 0 !important; + border-top-left-radius: 0 !important; } + +.rounded-start-1 { + border-bottom-left-radius: var(--bs-border-radius-sm) !important; + border-top-left-radius: var(--bs-border-radius-sm) !important; } + +.rounded-start-2 { + border-bottom-left-radius: var(--bs-border-radius) !important; + border-top-left-radius: var(--bs-border-radius) !important; } + +.rounded-start-3 { + border-bottom-left-radius: var(--bs-border-radius-lg) !important; + border-top-left-radius: var(--bs-border-radius-lg) !important; } + +.rounded-start-4 { + border-bottom-left-radius: var(--bs-border-radius-xl) !important; + border-top-left-radius: var(--bs-border-radius-xl) !important; } + +.rounded-start-5 { + border-bottom-left-radius: var(--bs-border-radius-xxl) !important; + border-top-left-radius: var(--bs-border-radius-xxl) !important; } + +.rounded-start-circle { + border-bottom-left-radius: 50% !important; + border-top-left-radius: 50% !important; } + +.rounded-start-pill { + border-bottom-left-radius: var(--bs-border-radius-pill) !important; + border-top-left-radius: var(--bs-border-radius-pill) !important; } + +.visible { + visibility: visible !important; } + +.invisible { + visibility: hidden !important; } + +.z-n1 { + z-index: -1 !important; } + +.z-0 { + z-index: 0 !important; } + +.z-1 { + z-index: 1 !important; } + +.z-2 { + z-index: 2 !important; } + +.z-3 { + z-index: 3 !important; } + +@media (min-width: 576px) { + .float-sm-start { + float: left !important; } + .float-sm-end { + float: right !important; } + .float-sm-none { + float: none !important; } + .object-fit-sm-contain { + object-fit: contain !important; } + .object-fit-sm-cover { + object-fit: cover !important; } + .object-fit-sm-fill { + object-fit: fill !important; } + .object-fit-sm-scale { + object-fit: scale-down !important; } + .object-fit-sm-none { + object-fit: none !important; } + .d-sm-inline { + display: inline !important; } + .d-sm-inline-block { + display: inline-block !important; } + .d-sm-block { + display: block !important; } + .d-sm-grid { + display: grid !important; } + .d-sm-inline-grid { + display: inline-grid !important; } + .d-sm-table { + display: table !important; } + .d-sm-table-row { + display: table-row !important; } + .d-sm-table-cell { + display: table-cell !important; } + .d-sm-flex { + display: flex !important; } + .d-sm-inline-flex { + display: inline-flex !important; } + .d-sm-none { + display: none !important; } + .flex-sm-fill { + flex: 1 1 auto !important; } + .flex-sm-row { + flex-direction: row !important; } + .flex-sm-column { + flex-direction: column !important; } + .flex-sm-row-reverse { + flex-direction: row-reverse !important; } + .flex-sm-column-reverse { + flex-direction: column-reverse !important; } + .flex-sm-grow-0 { + flex-grow: 0 !important; } + .flex-sm-grow-1 { + flex-grow: 1 !important; } + .flex-sm-shrink-0 { + flex-shrink: 0 !important; } + .flex-sm-shrink-1 { + flex-shrink: 1 !important; } + .flex-sm-wrap { + flex-wrap: wrap !important; } + .flex-sm-nowrap { + flex-wrap: nowrap !important; } + .flex-sm-wrap-reverse { + flex-wrap: wrap-reverse !important; } + .justify-content-sm-start { + justify-content: flex-start !important; } + .justify-content-sm-end { + justify-content: flex-end !important; } + .justify-content-sm-center { + justify-content: center !important; } + .justify-content-sm-between { + justify-content: space-between !important; } + .justify-content-sm-around { + justify-content: space-around !important; } + .justify-content-sm-evenly { + justify-content: space-evenly !important; } + .align-items-sm-start { + align-items: flex-start !important; } + .align-items-sm-end { + align-items: flex-end !important; } + .align-items-sm-center { + align-items: center !important; } + .align-items-sm-baseline { + align-items: baseline !important; } + .align-items-sm-stretch { + align-items: stretch !important; } + .align-content-sm-start { + align-content: flex-start !important; } + .align-content-sm-end { + align-content: flex-end !important; } + .align-content-sm-center { + align-content: center !important; } + .align-content-sm-between { + align-content: space-between !important; } + .align-content-sm-around { + align-content: space-around !important; } + .align-content-sm-stretch { + align-content: stretch !important; } + .align-self-sm-auto { + align-self: auto !important; } + .align-self-sm-start { + align-self: flex-start !important; } + .align-self-sm-end { + align-self: flex-end !important; } + .align-self-sm-center { + align-self: center !important; } + .align-self-sm-baseline { + align-self: baseline !important; } + .align-self-sm-stretch { + align-self: stretch !important; } + .order-sm-first { + order: -1 !important; } + .order-sm-0 { + order: 0 !important; } + .order-sm-1 { + order: 1 !important; } + .order-sm-2 { + order: 2 !important; } + .order-sm-3 { + order: 3 !important; } + .order-sm-4 { + order: 4 !important; } + .order-sm-5 { + order: 5 !important; } + .order-sm-last { + order: 6 !important; } + .m-sm-0 { + margin: 0 !important; } + .m-sm-1 { + margin: 0.25rem !important; } + .m-sm-2 { + margin: 0.5rem !important; } + .m-sm-3 { + margin: 1rem !important; } + .m-sm-4 { + margin: 1.5rem !important; } + .m-sm-5 { + margin: 3rem !important; } + .m-sm-auto { + margin: auto !important; } + .mx-sm-0 { + margin-right: 0 !important; + margin-left: 0 !important; } + .mx-sm-1 { + margin-right: 0.25rem !important; + margin-left: 0.25rem !important; } + .mx-sm-2 { + margin-right: 0.5rem !important; + margin-left: 0.5rem !important; } + .mx-sm-3 { + margin-right: 1rem !important; + margin-left: 1rem !important; } + .mx-sm-4 { + margin-right: 1.5rem !important; + margin-left: 1.5rem !important; } + .mx-sm-5 { + margin-right: 3rem !important; + margin-left: 3rem !important; } + .mx-sm-auto { + margin-right: auto !important; + margin-left: auto !important; } + .my-sm-0 { + margin-top: 0 !important; + margin-bottom: 0 !important; } + .my-sm-1 { + margin-top: 0.25rem !important; + margin-bottom: 0.25rem !important; } + .my-sm-2 { + margin-top: 0.5rem !important; + margin-bottom: 0.5rem !important; } + .my-sm-3 { + margin-top: 1rem !important; + margin-bottom: 1rem !important; } + .my-sm-4 { + margin-top: 1.5rem !important; + margin-bottom: 1.5rem !important; } + .my-sm-5 { + margin-top: 3rem !important; + margin-bottom: 3rem !important; } + .my-sm-auto { + margin-top: auto !important; + margin-bottom: auto !important; } + .mt-sm-0 { + margin-top: 0 !important; } + .mt-sm-1 { + margin-top: 0.25rem !important; } + .mt-sm-2 { + margin-top: 0.5rem !important; } + .mt-sm-3 { + margin-top: 1rem !important; } + .mt-sm-4 { + margin-top: 1.5rem !important; } + .mt-sm-5 { + margin-top: 3rem !important; } + .mt-sm-auto { + margin-top: auto !important; } + .me-sm-0 { + margin-right: 0 !important; } + .me-sm-1 { + margin-right: 0.25rem !important; } + .me-sm-2 { + margin-right: 0.5rem !important; } + .me-sm-3 { + margin-right: 1rem !important; } + .me-sm-4 { + margin-right: 1.5rem !important; } + .me-sm-5 { + margin-right: 3rem !important; } + .me-sm-auto { + margin-right: auto !important; } + .mb-sm-0 { + margin-bottom: 0 !important; } + .mb-sm-1 { + margin-bottom: 0.25rem !important; } + .mb-sm-2 { + margin-bottom: 0.5rem !important; } + .mb-sm-3 { + margin-bottom: 1rem !important; } + .mb-sm-4 { + margin-bottom: 1.5rem !important; } + .mb-sm-5 { + margin-bottom: 3rem !important; } + .mb-sm-auto { + margin-bottom: auto !important; } + .ms-sm-0 { + margin-left: 0 !important; } + .ms-sm-1 { + margin-left: 0.25rem !important; } + .ms-sm-2 { + margin-left: 0.5rem !important; } + .ms-sm-3 { + margin-left: 1rem !important; } + .ms-sm-4 { + margin-left: 1.5rem !important; } + .ms-sm-5 { + margin-left: 3rem !important; } + .ms-sm-auto { + margin-left: auto !important; } + .p-sm-0 { + padding: 0 !important; } + .p-sm-1 { + padding: 0.25rem !important; } + .p-sm-2 { + padding: 0.5rem !important; } + .p-sm-3 { + padding: 1rem !important; } + .p-sm-4 { + padding: 1.5rem !important; } + .p-sm-5 { + padding: 3rem !important; } + .px-sm-0 { + padding-right: 0 !important; + padding-left: 0 !important; } + .px-sm-1 { + padding-right: 0.25rem !important; + padding-left: 0.25rem !important; } + .px-sm-2 { + padding-right: 0.5rem !important; + padding-left: 0.5rem !important; } + .px-sm-3 { + padding-right: 1rem !important; + padding-left: 1rem !important; } + .px-sm-4 { + padding-right: 1.5rem !important; + padding-left: 1.5rem !important; } + .px-sm-5 { + padding-right: 3rem !important; + padding-left: 3rem !important; } + .py-sm-0 { + padding-top: 0 !important; + padding-bottom: 0 !important; } + .py-sm-1 { + padding-top: 0.25rem !important; + padding-bottom: 0.25rem !important; } + .py-sm-2 { + padding-top: 0.5rem !important; + padding-bottom: 0.5rem !important; } + .py-sm-3 { + padding-top: 1rem !important; + padding-bottom: 1rem !important; } + .py-sm-4 { + padding-top: 1.5rem !important; + padding-bottom: 1.5rem !important; } + .py-sm-5 { + padding-top: 3rem !important; + padding-bottom: 3rem !important; } + .pt-sm-0 { + padding-top: 0 !important; } + .pt-sm-1 { + padding-top: 0.25rem !important; } + .pt-sm-2 { + padding-top: 0.5rem !important; } + .pt-sm-3 { + padding-top: 1rem !important; } + .pt-sm-4 { + padding-top: 1.5rem !important; } + .pt-sm-5 { + padding-top: 3rem !important; } + .pe-sm-0 { + padding-right: 0 !important; } + .pe-sm-1 { + padding-right: 0.25rem !important; } + .pe-sm-2 { + padding-right: 0.5rem !important; } + .pe-sm-3 { + padding-right: 1rem !important; } + .pe-sm-4 { + padding-right: 1.5rem !important; } + .pe-sm-5 { + padding-right: 3rem !important; } + .pb-sm-0 { + padding-bottom: 0 !important; } + .pb-sm-1 { + padding-bottom: 0.25rem !important; } + .pb-sm-2 { + padding-bottom: 0.5rem !important; } + .pb-sm-3 { + padding-bottom: 1rem !important; } + .pb-sm-4 { + padding-bottom: 1.5rem !important; } + .pb-sm-5 { + padding-bottom: 3rem !important; } + .ps-sm-0 { + padding-left: 0 !important; } + .ps-sm-1 { + padding-left: 0.25rem !important; } + .ps-sm-2 { + padding-left: 0.5rem !important; } + .ps-sm-3 { + padding-left: 1rem !important; } + .ps-sm-4 { + padding-left: 1.5rem !important; } + .ps-sm-5 { + padding-left: 3rem !important; } + .gap-sm-0 { + gap: 0 !important; } + .gap-sm-1 { + gap: 0.25rem !important; } + .gap-sm-2 { + gap: 0.5rem !important; } + .gap-sm-3 { + gap: 1rem !important; } + .gap-sm-4 { + gap: 1.5rem !important; } + .gap-sm-5 { + gap: 3rem !important; } + .row-gap-sm-0 { + row-gap: 0 !important; } + .row-gap-sm-1 { + row-gap: 0.25rem !important; } + .row-gap-sm-2 { + row-gap: 0.5rem !important; } + .row-gap-sm-3 { + row-gap: 1rem !important; } + .row-gap-sm-4 { + row-gap: 1.5rem !important; } + .row-gap-sm-5 { + row-gap: 3rem !important; } + .column-gap-sm-0 { + column-gap: 0 !important; } + .column-gap-sm-1 { + column-gap: 0.25rem !important; } + .column-gap-sm-2 { + column-gap: 0.5rem !important; } + .column-gap-sm-3 { + column-gap: 1rem !important; } + .column-gap-sm-4 { + column-gap: 1.5rem !important; } + .column-gap-sm-5 { + column-gap: 3rem !important; } + .text-sm-start { + text-align: left !important; } + .text-sm-end { + text-align: right !important; } + .text-sm-center { + text-align: center !important; } } + +@media (min-width: 768px) { + .float-md-start { + float: left !important; } + .float-md-end { + float: right !important; } + .float-md-none { + float: none !important; } + .object-fit-md-contain { + object-fit: contain !important; } + .object-fit-md-cover { + object-fit: cover !important; } + .object-fit-md-fill { + object-fit: fill !important; } + .object-fit-md-scale { + object-fit: scale-down !important; } + .object-fit-md-none { + object-fit: none !important; } + .d-md-inline { + display: inline !important; } + .d-md-inline-block { + display: inline-block !important; } + .d-md-block { + display: block !important; } + .d-md-grid { + display: grid !important; } + .d-md-inline-grid { + display: inline-grid !important; } + .d-md-table { + display: table !important; } + .d-md-table-row { + display: table-row !important; } + .d-md-table-cell { + display: table-cell !important; } + .d-md-flex { + display: flex !important; } + .d-md-inline-flex { + display: inline-flex !important; } + .d-md-none { + display: none !important; } + .flex-md-fill { + flex: 1 1 auto !important; } + .flex-md-row { + flex-direction: row !important; } + .flex-md-column { + flex-direction: column !important; } + .flex-md-row-reverse { + flex-direction: row-reverse !important; } + .flex-md-column-reverse { + flex-direction: column-reverse !important; } + .flex-md-grow-0 { + flex-grow: 0 !important; } + .flex-md-grow-1 { + flex-grow: 1 !important; } + .flex-md-shrink-0 { + flex-shrink: 0 !important; } + .flex-md-shrink-1 { + flex-shrink: 1 !important; } + .flex-md-wrap { + flex-wrap: wrap !important; } + .flex-md-nowrap { + flex-wrap: nowrap !important; } + .flex-md-wrap-reverse { + flex-wrap: wrap-reverse !important; } + .justify-content-md-start { + justify-content: flex-start !important; } + .justify-content-md-end { + justify-content: flex-end !important; } + .justify-content-md-center { + justify-content: center !important; } + .justify-content-md-between { + justify-content: space-between !important; } + .justify-content-md-around { + justify-content: space-around !important; } + .justify-content-md-evenly { + justify-content: space-evenly !important; } + .align-items-md-start { + align-items: flex-start !important; } + .align-items-md-end { + align-items: flex-end !important; } + .align-items-md-center { + align-items: center !important; } + .align-items-md-baseline { + align-items: baseline !important; } + .align-items-md-stretch { + align-items: stretch !important; } + .align-content-md-start { + align-content: flex-start !important; } + .align-content-md-end { + align-content: flex-end !important; } + .align-content-md-center { + align-content: center !important; } + .align-content-md-between { + align-content: space-between !important; } + .align-content-md-around { + align-content: space-around !important; } + .align-content-md-stretch { + align-content: stretch !important; } + .align-self-md-auto { + align-self: auto !important; } + .align-self-md-start { + align-self: flex-start !important; } + .align-self-md-end { + align-self: flex-end !important; } + .align-self-md-center { + align-self: center !important; } + .align-self-md-baseline { + align-self: baseline !important; } + .align-self-md-stretch { + align-self: stretch !important; } + .order-md-first { + order: -1 !important; } + .order-md-0 { + order: 0 !important; } + .order-md-1 { + order: 1 !important; } + .order-md-2 { + order: 2 !important; } + .order-md-3 { + order: 3 !important; } + .order-md-4 { + order: 4 !important; } + .order-md-5 { + order: 5 !important; } + .order-md-last { + order: 6 !important; } + .m-md-0 { + margin: 0 !important; } + .m-md-1 { + margin: 0.25rem !important; } + .m-md-2 { + margin: 0.5rem !important; } + .m-md-3 { + margin: 1rem !important; } + .m-md-4 { + margin: 1.5rem !important; } + .m-md-5 { + margin: 3rem !important; } + .m-md-auto { + margin: auto !important; } + .mx-md-0 { + margin-right: 0 !important; + margin-left: 0 !important; } + .mx-md-1 { + margin-right: 0.25rem !important; + margin-left: 0.25rem !important; } + .mx-md-2 { + margin-right: 0.5rem !important; + margin-left: 0.5rem !important; } + .mx-md-3 { + margin-right: 1rem !important; + margin-left: 1rem !important; } + .mx-md-4 { + margin-right: 1.5rem !important; + margin-left: 1.5rem !important; } + .mx-md-5 { + margin-right: 3rem !important; + margin-left: 3rem !important; } + .mx-md-auto { + margin-right: auto !important; + margin-left: auto !important; } + .my-md-0 { + margin-top: 0 !important; + margin-bottom: 0 !important; } + .my-md-1 { + margin-top: 0.25rem !important; + margin-bottom: 0.25rem !important; } + .my-md-2 { + margin-top: 0.5rem !important; + margin-bottom: 0.5rem !important; } + .my-md-3 { + margin-top: 1rem !important; + margin-bottom: 1rem !important; } + .my-md-4 { + margin-top: 1.5rem !important; + margin-bottom: 1.5rem !important; } + .my-md-5 { + margin-top: 3rem !important; + margin-bottom: 3rem !important; } + .my-md-auto { + margin-top: auto !important; + margin-bottom: auto !important; } + .mt-md-0 { + margin-top: 0 !important; } + .mt-md-1 { + margin-top: 0.25rem !important; } + .mt-md-2 { + margin-top: 0.5rem !important; } + .mt-md-3 { + margin-top: 1rem !important; } + .mt-md-4 { + margin-top: 1.5rem !important; } + .mt-md-5 { + margin-top: 3rem !important; } + .mt-md-auto { + margin-top: auto !important; } + .me-md-0 { + margin-right: 0 !important; } + .me-md-1 { + margin-right: 0.25rem !important; } + .me-md-2 { + margin-right: 0.5rem !important; } + .me-md-3 { + margin-right: 1rem !important; } + .me-md-4 { + margin-right: 1.5rem !important; } + .me-md-5 { + margin-right: 3rem !important; } + .me-md-auto { + margin-right: auto !important; } + .mb-md-0 { + margin-bottom: 0 !important; } + .mb-md-1 { + margin-bottom: 0.25rem !important; } + .mb-md-2 { + margin-bottom: 0.5rem !important; } + .mb-md-3 { + margin-bottom: 1rem !important; } + .mb-md-4 { + margin-bottom: 1.5rem !important; } + .mb-md-5 { + margin-bottom: 3rem !important; } + .mb-md-auto { + margin-bottom: auto !important; } + .ms-md-0 { + margin-left: 0 !important; } + .ms-md-1 { + margin-left: 0.25rem !important; } + .ms-md-2 { + margin-left: 0.5rem !important; } + .ms-md-3 { + margin-left: 1rem !important; } + .ms-md-4 { + margin-left: 1.5rem !important; } + .ms-md-5 { + margin-left: 3rem !important; } + .ms-md-auto { + margin-left: auto !important; } + .p-md-0 { + padding: 0 !important; } + .p-md-1 { + padding: 0.25rem !important; } + .p-md-2 { + padding: 0.5rem !important; } + .p-md-3 { + padding: 1rem !important; } + .p-md-4 { + padding: 1.5rem !important; } + .p-md-5 { + padding: 3rem !important; } + .px-md-0 { + padding-right: 0 !important; + padding-left: 0 !important; } + .px-md-1 { + padding-right: 0.25rem !important; + padding-left: 0.25rem !important; } + .px-md-2 { + padding-right: 0.5rem !important; + padding-left: 0.5rem !important; } + .px-md-3 { + padding-right: 1rem !important; + padding-left: 1rem !important; } + .px-md-4 { + padding-right: 1.5rem !important; + padding-left: 1.5rem !important; } + .px-md-5 { + padding-right: 3rem !important; + padding-left: 3rem !important; } + .py-md-0 { + padding-top: 0 !important; + padding-bottom: 0 !important; } + .py-md-1 { + padding-top: 0.25rem !important; + padding-bottom: 0.25rem !important; } + .py-md-2 { + padding-top: 0.5rem !important; + padding-bottom: 0.5rem !important; } + .py-md-3 { + padding-top: 1rem !important; + padding-bottom: 1rem !important; } + .py-md-4 { + padding-top: 1.5rem !important; + padding-bottom: 1.5rem !important; } + .py-md-5 { + padding-top: 3rem !important; + padding-bottom: 3rem !important; } + .pt-md-0 { + padding-top: 0 !important; } + .pt-md-1 { + padding-top: 0.25rem !important; } + .pt-md-2 { + padding-top: 0.5rem !important; } + .pt-md-3 { + padding-top: 1rem !important; } + .pt-md-4 { + padding-top: 1.5rem !important; } + .pt-md-5 { + padding-top: 3rem !important; } + .pe-md-0 { + padding-right: 0 !important; } + .pe-md-1 { + padding-right: 0.25rem !important; } + .pe-md-2 { + padding-right: 0.5rem !important; } + .pe-md-3 { + padding-right: 1rem !important; } + .pe-md-4 { + padding-right: 1.5rem !important; } + .pe-md-5 { + padding-right: 3rem !important; } + .pb-md-0 { + padding-bottom: 0 !important; } + .pb-md-1 { + padding-bottom: 0.25rem !important; } + .pb-md-2 { + padding-bottom: 0.5rem !important; } + .pb-md-3 { + padding-bottom: 1rem !important; } + .pb-md-4 { + padding-bottom: 1.5rem !important; } + .pb-md-5 { + padding-bottom: 3rem !important; } + .ps-md-0 { + padding-left: 0 !important; } + .ps-md-1 { + padding-left: 0.25rem !important; } + .ps-md-2 { + padding-left: 0.5rem !important; } + .ps-md-3 { + padding-left: 1rem !important; } + .ps-md-4 { + padding-left: 1.5rem !important; } + .ps-md-5 { + padding-left: 3rem !important; } + .gap-md-0 { + gap: 0 !important; } + .gap-md-1 { + gap: 0.25rem !important; } + .gap-md-2 { + gap: 0.5rem !important; } + .gap-md-3 { + gap: 1rem !important; } + .gap-md-4 { + gap: 1.5rem !important; } + .gap-md-5 { + gap: 3rem !important; } + .row-gap-md-0 { + row-gap: 0 !important; } + .row-gap-md-1 { + row-gap: 0.25rem !important; } + .row-gap-md-2 { + row-gap: 0.5rem !important; } + .row-gap-md-3 { + row-gap: 1rem !important; } + .row-gap-md-4 { + row-gap: 1.5rem !important; } + .row-gap-md-5 { + row-gap: 3rem !important; } + .column-gap-md-0 { + column-gap: 0 !important; } + .column-gap-md-1 { + column-gap: 0.25rem !important; } + .column-gap-md-2 { + column-gap: 0.5rem !important; } + .column-gap-md-3 { + column-gap: 1rem !important; } + .column-gap-md-4 { + column-gap: 1.5rem !important; } + .column-gap-md-5 { + column-gap: 3rem !important; } + .text-md-start { + text-align: left !important; } + .text-md-end { + text-align: right !important; } + .text-md-center { + text-align: center !important; } } + +@media (min-width: 992px) { + .float-lg-start { + float: left !important; } + .float-lg-end { + float: right !important; } + .float-lg-none { + float: none !important; } + .object-fit-lg-contain { + object-fit: contain !important; } + .object-fit-lg-cover { + object-fit: cover !important; } + .object-fit-lg-fill { + object-fit: fill !important; } + .object-fit-lg-scale { + object-fit: scale-down !important; } + .object-fit-lg-none { + object-fit: none !important; } + .d-lg-inline { + display: inline !important; } + .d-lg-inline-block { + display: inline-block !important; } + .d-lg-block, .td-blog .td-rss-button { + display: block !important; } + .d-lg-grid { + display: grid !important; } + .d-lg-inline-grid { + display: inline-grid !important; } + .d-lg-table { + display: table !important; } + .d-lg-table-row { + display: table-row !important; } + .d-lg-table-cell { + display: table-cell !important; } + .d-lg-flex { + display: flex !important; } + .d-lg-inline-flex { + display: inline-flex !important; } + .d-lg-none { + display: none !important; } + .flex-lg-fill { + flex: 1 1 auto !important; } + .flex-lg-row { + flex-direction: row !important; } + .flex-lg-column { + flex-direction: column !important; } + .flex-lg-row-reverse { + flex-direction: row-reverse !important; } + .flex-lg-column-reverse { + flex-direction: column-reverse !important; } + .flex-lg-grow-0 { + flex-grow: 0 !important; } + .flex-lg-grow-1 { + flex-grow: 1 !important; } + .flex-lg-shrink-0 { + flex-shrink: 0 !important; } + .flex-lg-shrink-1 { + flex-shrink: 1 !important; } + .flex-lg-wrap { + flex-wrap: wrap !important; } + .flex-lg-nowrap { + flex-wrap: nowrap !important; } + .flex-lg-wrap-reverse { + flex-wrap: wrap-reverse !important; } + .justify-content-lg-start { + justify-content: flex-start !important; } + .justify-content-lg-end { + justify-content: flex-end !important; } + .justify-content-lg-center { + justify-content: center !important; } + .justify-content-lg-between { + justify-content: space-between !important; } + .justify-content-lg-around { + justify-content: space-around !important; } + .justify-content-lg-evenly { + justify-content: space-evenly !important; } + .align-items-lg-start { + align-items: flex-start !important; } + .align-items-lg-end { + align-items: flex-end !important; } + .align-items-lg-center { + align-items: center !important; } + .align-items-lg-baseline { + align-items: baseline !important; } + .align-items-lg-stretch { + align-items: stretch !important; } + .align-content-lg-start { + align-content: flex-start !important; } + .align-content-lg-end { + align-content: flex-end !important; } + .align-content-lg-center { + align-content: center !important; } + .align-content-lg-between { + align-content: space-between !important; } + .align-content-lg-around { + align-content: space-around !important; } + .align-content-lg-stretch { + align-content: stretch !important; } + .align-self-lg-auto { + align-self: auto !important; } + .align-self-lg-start { + align-self: flex-start !important; } + .align-self-lg-end { + align-self: flex-end !important; } + .align-self-lg-center { + align-self: center !important; } + .align-self-lg-baseline { + align-self: baseline !important; } + .align-self-lg-stretch { + align-self: stretch !important; } + .order-lg-first { + order: -1 !important; } + .order-lg-0 { + order: 0 !important; } + .order-lg-1 { + order: 1 !important; } + .order-lg-2 { + order: 2 !important; } + .order-lg-3 { + order: 3 !important; } + .order-lg-4 { + order: 4 !important; } + .order-lg-5 { + order: 5 !important; } + .order-lg-last { + order: 6 !important; } + .m-lg-0 { + margin: 0 !important; } + .m-lg-1 { + margin: 0.25rem !important; } + .m-lg-2 { + margin: 0.5rem !important; } + .m-lg-3 { + margin: 1rem !important; } + .m-lg-4 { + margin: 1.5rem !important; } + .m-lg-5 { + margin: 3rem !important; } + .m-lg-auto { + margin: auto !important; } + .mx-lg-0 { + margin-right: 0 !important; + margin-left: 0 !important; } + .mx-lg-1 { + margin-right: 0.25rem !important; + margin-left: 0.25rem !important; } + .mx-lg-2 { + margin-right: 0.5rem !important; + margin-left: 0.5rem !important; } + .mx-lg-3 { + margin-right: 1rem !important; + margin-left: 1rem !important; } + .mx-lg-4 { + margin-right: 1.5rem !important; + margin-left: 1.5rem !important; } + .mx-lg-5 { + margin-right: 3rem !important; + margin-left: 3rem !important; } + .mx-lg-auto { + margin-right: auto !important; + margin-left: auto !important; } + .my-lg-0 { + margin-top: 0 !important; + margin-bottom: 0 !important; } + .my-lg-1 { + margin-top: 0.25rem !important; + margin-bottom: 0.25rem !important; } + .my-lg-2 { + margin-top: 0.5rem !important; + margin-bottom: 0.5rem !important; } + .my-lg-3 { + margin-top: 1rem !important; + margin-bottom: 1rem !important; } + .my-lg-4 { + margin-top: 1.5rem !important; + margin-bottom: 1.5rem !important; } + .my-lg-5 { + margin-top: 3rem !important; + margin-bottom: 3rem !important; } + .my-lg-auto { + margin-top: auto !important; + margin-bottom: auto !important; } + .mt-lg-0 { + margin-top: 0 !important; } + .mt-lg-1 { + margin-top: 0.25rem !important; } + .mt-lg-2 { + margin-top: 0.5rem !important; } + .mt-lg-3 { + margin-top: 1rem !important; } + .mt-lg-4 { + margin-top: 1.5rem !important; } + .mt-lg-5 { + margin-top: 3rem !important; } + .mt-lg-auto { + margin-top: auto !important; } + .me-lg-0 { + margin-right: 0 !important; } + .me-lg-1 { + margin-right: 0.25rem !important; } + .me-lg-2 { + margin-right: 0.5rem !important; } + .me-lg-3 { + margin-right: 1rem !important; } + .me-lg-4 { + margin-right: 1.5rem !important; } + .me-lg-5 { + margin-right: 3rem !important; } + .me-lg-auto { + margin-right: auto !important; } + .mb-lg-0 { + margin-bottom: 0 !important; } + .mb-lg-1 { + margin-bottom: 0.25rem !important; } + .mb-lg-2 { + margin-bottom: 0.5rem !important; } + .mb-lg-3 { + margin-bottom: 1rem !important; } + .mb-lg-4 { + margin-bottom: 1.5rem !important; } + .mb-lg-5 { + margin-bottom: 3rem !important; } + .mb-lg-auto { + margin-bottom: auto !important; } + .ms-lg-0 { + margin-left: 0 !important; } + .ms-lg-1 { + margin-left: 0.25rem !important; } + .ms-lg-2 { + margin-left: 0.5rem !important; } + .ms-lg-3 { + margin-left: 1rem !important; } + .ms-lg-4 { + margin-left: 1.5rem !important; } + .ms-lg-5 { + margin-left: 3rem !important; } + .ms-lg-auto { + margin-left: auto !important; } + .p-lg-0 { + padding: 0 !important; } + .p-lg-1 { + padding: 0.25rem !important; } + .p-lg-2 { + padding: 0.5rem !important; } + .p-lg-3 { + padding: 1rem !important; } + .p-lg-4 { + padding: 1.5rem !important; } + .p-lg-5 { + padding: 3rem !important; } + .px-lg-0 { + padding-right: 0 !important; + padding-left: 0 !important; } + .px-lg-1 { + padding-right: 0.25rem !important; + padding-left: 0.25rem !important; } + .px-lg-2 { + padding-right: 0.5rem !important; + padding-left: 0.5rem !important; } + .px-lg-3 { + padding-right: 1rem !important; + padding-left: 1rem !important; } + .px-lg-4 { + padding-right: 1.5rem !important; + padding-left: 1.5rem !important; } + .px-lg-5 { + padding-right: 3rem !important; + padding-left: 3rem !important; } + .py-lg-0 { + padding-top: 0 !important; + padding-bottom: 0 !important; } + .py-lg-1 { + padding-top: 0.25rem !important; + padding-bottom: 0.25rem !important; } + .py-lg-2 { + padding-top: 0.5rem !important; + padding-bottom: 0.5rem !important; } + .py-lg-3 { + padding-top: 1rem !important; + padding-bottom: 1rem !important; } + .py-lg-4 { + padding-top: 1.5rem !important; + padding-bottom: 1.5rem !important; } + .py-lg-5 { + padding-top: 3rem !important; + padding-bottom: 3rem !important; } + .pt-lg-0 { + padding-top: 0 !important; } + .pt-lg-1 { + padding-top: 0.25rem !important; } + .pt-lg-2 { + padding-top: 0.5rem !important; } + .pt-lg-3 { + padding-top: 1rem !important; } + .pt-lg-4 { + padding-top: 1.5rem !important; } + .pt-lg-5 { + padding-top: 3rem !important; } + .pe-lg-0 { + padding-right: 0 !important; } + .pe-lg-1 { + padding-right: 0.25rem !important; } + .pe-lg-2 { + padding-right: 0.5rem !important; } + .pe-lg-3 { + padding-right: 1rem !important; } + .pe-lg-4 { + padding-right: 1.5rem !important; } + .pe-lg-5 { + padding-right: 3rem !important; } + .pb-lg-0 { + padding-bottom: 0 !important; } + .pb-lg-1 { + padding-bottom: 0.25rem !important; } + .pb-lg-2 { + padding-bottom: 0.5rem !important; } + .pb-lg-3 { + padding-bottom: 1rem !important; } + .pb-lg-4 { + padding-bottom: 1.5rem !important; } + .pb-lg-5 { + padding-bottom: 3rem !important; } + .ps-lg-0 { + padding-left: 0 !important; } + .ps-lg-1 { + padding-left: 0.25rem !important; } + .ps-lg-2 { + padding-left: 0.5rem !important; } + .ps-lg-3 { + padding-left: 1rem !important; } + .ps-lg-4 { + padding-left: 1.5rem !important; } + .ps-lg-5 { + padding-left: 3rem !important; } + .gap-lg-0 { + gap: 0 !important; } + .gap-lg-1 { + gap: 0.25rem !important; } + .gap-lg-2 { + gap: 0.5rem !important; } + .gap-lg-3 { + gap: 1rem !important; } + .gap-lg-4 { + gap: 1.5rem !important; } + .gap-lg-5 { + gap: 3rem !important; } + .row-gap-lg-0 { + row-gap: 0 !important; } + .row-gap-lg-1 { + row-gap: 0.25rem !important; } + .row-gap-lg-2 { + row-gap: 0.5rem !important; } + .row-gap-lg-3 { + row-gap: 1rem !important; } + .row-gap-lg-4 { + row-gap: 1.5rem !important; } + .row-gap-lg-5 { + row-gap: 3rem !important; } + .column-gap-lg-0 { + column-gap: 0 !important; } + .column-gap-lg-1 { + column-gap: 0.25rem !important; } + .column-gap-lg-2 { + column-gap: 0.5rem !important; } + .column-gap-lg-3 { + column-gap: 1rem !important; } + .column-gap-lg-4 { + column-gap: 1.5rem !important; } + .column-gap-lg-5 { + column-gap: 3rem !important; } + .text-lg-start { + text-align: left !important; } + .text-lg-end { + text-align: right !important; } + .text-lg-center { + text-align: center !important; } } + +@media (min-width: 1200px) { + .float-xl-start { + float: left !important; } + .float-xl-end { + float: right !important; } + .float-xl-none { + float: none !important; } + .object-fit-xl-contain { + object-fit: contain !important; } + .object-fit-xl-cover { + object-fit: cover !important; } + .object-fit-xl-fill { + object-fit: fill !important; } + .object-fit-xl-scale { + object-fit: scale-down !important; } + .object-fit-xl-none { + object-fit: none !important; } + .d-xl-inline { + display: inline !important; } + .d-xl-inline-block { + display: inline-block !important; } + .d-xl-block { + display: block !important; } + .d-xl-grid { + display: grid !important; } + .d-xl-inline-grid { + display: inline-grid !important; } + .d-xl-table { + display: table !important; } + .d-xl-table-row { + display: table-row !important; } + .d-xl-table-cell { + display: table-cell !important; } + .d-xl-flex { + display: flex !important; } + .d-xl-inline-flex { + display: inline-flex !important; } + .d-xl-none { + display: none !important; } + .flex-xl-fill { + flex: 1 1 auto !important; } + .flex-xl-row { + flex-direction: row !important; } + .flex-xl-column { + flex-direction: column !important; } + .flex-xl-row-reverse { + flex-direction: row-reverse !important; } + .flex-xl-column-reverse { + flex-direction: column-reverse !important; } + .flex-xl-grow-0 { + flex-grow: 0 !important; } + .flex-xl-grow-1 { + flex-grow: 1 !important; } + .flex-xl-shrink-0 { + flex-shrink: 0 !important; } + .flex-xl-shrink-1 { + flex-shrink: 1 !important; } + .flex-xl-wrap { + flex-wrap: wrap !important; } + .flex-xl-nowrap { + flex-wrap: nowrap !important; } + .flex-xl-wrap-reverse { + flex-wrap: wrap-reverse !important; } + .justify-content-xl-start { + justify-content: flex-start !important; } + .justify-content-xl-end { + justify-content: flex-end !important; } + .justify-content-xl-center { + justify-content: center !important; } + .justify-content-xl-between { + justify-content: space-between !important; } + .justify-content-xl-around { + justify-content: space-around !important; } + .justify-content-xl-evenly { + justify-content: space-evenly !important; } + .align-items-xl-start { + align-items: flex-start !important; } + .align-items-xl-end { + align-items: flex-end !important; } + .align-items-xl-center { + align-items: center !important; } + .align-items-xl-baseline { + align-items: baseline !important; } + .align-items-xl-stretch { + align-items: stretch !important; } + .align-content-xl-start { + align-content: flex-start !important; } + .align-content-xl-end { + align-content: flex-end !important; } + .align-content-xl-center { + align-content: center !important; } + .align-content-xl-between { + align-content: space-between !important; } + .align-content-xl-around { + align-content: space-around !important; } + .align-content-xl-stretch { + align-content: stretch !important; } + .align-self-xl-auto { + align-self: auto !important; } + .align-self-xl-start { + align-self: flex-start !important; } + .align-self-xl-end { + align-self: flex-end !important; } + .align-self-xl-center { + align-self: center !important; } + .align-self-xl-baseline { + align-self: baseline !important; } + .align-self-xl-stretch { + align-self: stretch !important; } + .order-xl-first { + order: -1 !important; } + .order-xl-0 { + order: 0 !important; } + .order-xl-1 { + order: 1 !important; } + .order-xl-2 { + order: 2 !important; } + .order-xl-3 { + order: 3 !important; } + .order-xl-4 { + order: 4 !important; } + .order-xl-5 { + order: 5 !important; } + .order-xl-last { + order: 6 !important; } + .m-xl-0 { + margin: 0 !important; } + .m-xl-1 { + margin: 0.25rem !important; } + .m-xl-2 { + margin: 0.5rem !important; } + .m-xl-3 { + margin: 1rem !important; } + .m-xl-4 { + margin: 1.5rem !important; } + .m-xl-5 { + margin: 3rem !important; } + .m-xl-auto { + margin: auto !important; } + .mx-xl-0 { + margin-right: 0 !important; + margin-left: 0 !important; } + .mx-xl-1 { + margin-right: 0.25rem !important; + margin-left: 0.25rem !important; } + .mx-xl-2 { + margin-right: 0.5rem !important; + margin-left: 0.5rem !important; } + .mx-xl-3 { + margin-right: 1rem !important; + margin-left: 1rem !important; } + .mx-xl-4 { + margin-right: 1.5rem !important; + margin-left: 1.5rem !important; } + .mx-xl-5 { + margin-right: 3rem !important; + margin-left: 3rem !important; } + .mx-xl-auto { + margin-right: auto !important; + margin-left: auto !important; } + .my-xl-0 { + margin-top: 0 !important; + margin-bottom: 0 !important; } + .my-xl-1 { + margin-top: 0.25rem !important; + margin-bottom: 0.25rem !important; } + .my-xl-2 { + margin-top: 0.5rem !important; + margin-bottom: 0.5rem !important; } + .my-xl-3 { + margin-top: 1rem !important; + margin-bottom: 1rem !important; } + .my-xl-4 { + margin-top: 1.5rem !important; + margin-bottom: 1.5rem !important; } + .my-xl-5 { + margin-top: 3rem !important; + margin-bottom: 3rem !important; } + .my-xl-auto { + margin-top: auto !important; + margin-bottom: auto !important; } + .mt-xl-0 { + margin-top: 0 !important; } + .mt-xl-1 { + margin-top: 0.25rem !important; } + .mt-xl-2 { + margin-top: 0.5rem !important; } + .mt-xl-3 { + margin-top: 1rem !important; } + .mt-xl-4 { + margin-top: 1.5rem !important; } + .mt-xl-5 { + margin-top: 3rem !important; } + .mt-xl-auto { + margin-top: auto !important; } + .me-xl-0 { + margin-right: 0 !important; } + .me-xl-1 { + margin-right: 0.25rem !important; } + .me-xl-2 { + margin-right: 0.5rem !important; } + .me-xl-3 { + margin-right: 1rem !important; } + .me-xl-4 { + margin-right: 1.5rem !important; } + .me-xl-5 { + margin-right: 3rem !important; } + .me-xl-auto { + margin-right: auto !important; } + .mb-xl-0 { + margin-bottom: 0 !important; } + .mb-xl-1 { + margin-bottom: 0.25rem !important; } + .mb-xl-2 { + margin-bottom: 0.5rem !important; } + .mb-xl-3 { + margin-bottom: 1rem !important; } + .mb-xl-4 { + margin-bottom: 1.5rem !important; } + .mb-xl-5 { + margin-bottom: 3rem !important; } + .mb-xl-auto { + margin-bottom: auto !important; } + .ms-xl-0 { + margin-left: 0 !important; } + .ms-xl-1 { + margin-left: 0.25rem !important; } + .ms-xl-2 { + margin-left: 0.5rem !important; } + .ms-xl-3 { + margin-left: 1rem !important; } + .ms-xl-4 { + margin-left: 1.5rem !important; } + .ms-xl-5 { + margin-left: 3rem !important; } + .ms-xl-auto { + margin-left: auto !important; } + .p-xl-0 { + padding: 0 !important; } + .p-xl-1 { + padding: 0.25rem !important; } + .p-xl-2 { + padding: 0.5rem !important; } + .p-xl-3 { + padding: 1rem !important; } + .p-xl-4 { + padding: 1.5rem !important; } + .p-xl-5 { + padding: 3rem !important; } + .px-xl-0 { + padding-right: 0 !important; + padding-left: 0 !important; } + .px-xl-1 { + padding-right: 0.25rem !important; + padding-left: 0.25rem !important; } + .px-xl-2 { + padding-right: 0.5rem !important; + padding-left: 0.5rem !important; } + .px-xl-3 { + padding-right: 1rem !important; + padding-left: 1rem !important; } + .px-xl-4 { + padding-right: 1.5rem !important; + padding-left: 1.5rem !important; } + .px-xl-5 { + padding-right: 3rem !important; + padding-left: 3rem !important; } + .py-xl-0 { + padding-top: 0 !important; + padding-bottom: 0 !important; } + .py-xl-1 { + padding-top: 0.25rem !important; + padding-bottom: 0.25rem !important; } + .py-xl-2 { + padding-top: 0.5rem !important; + padding-bottom: 0.5rem !important; } + .py-xl-3 { + padding-top: 1rem !important; + padding-bottom: 1rem !important; } + .py-xl-4 { + padding-top: 1.5rem !important; + padding-bottom: 1.5rem !important; } + .py-xl-5 { + padding-top: 3rem !important; + padding-bottom: 3rem !important; } + .pt-xl-0 { + padding-top: 0 !important; } + .pt-xl-1 { + padding-top: 0.25rem !important; } + .pt-xl-2 { + padding-top: 0.5rem !important; } + .pt-xl-3 { + padding-top: 1rem !important; } + .pt-xl-4 { + padding-top: 1.5rem !important; } + .pt-xl-5 { + padding-top: 3rem !important; } + .pe-xl-0 { + padding-right: 0 !important; } + .pe-xl-1 { + padding-right: 0.25rem !important; } + .pe-xl-2 { + padding-right: 0.5rem !important; } + .pe-xl-3 { + padding-right: 1rem !important; } + .pe-xl-4 { + padding-right: 1.5rem !important; } + .pe-xl-5 { + padding-right: 3rem !important; } + .pb-xl-0 { + padding-bottom: 0 !important; } + .pb-xl-1 { + padding-bottom: 0.25rem !important; } + .pb-xl-2 { + padding-bottom: 0.5rem !important; } + .pb-xl-3 { + padding-bottom: 1rem !important; } + .pb-xl-4 { + padding-bottom: 1.5rem !important; } + .pb-xl-5 { + padding-bottom: 3rem !important; } + .ps-xl-0 { + padding-left: 0 !important; } + .ps-xl-1 { + padding-left: 0.25rem !important; } + .ps-xl-2 { + padding-left: 0.5rem !important; } + .ps-xl-3 { + padding-left: 1rem !important; } + .ps-xl-4 { + padding-left: 1.5rem !important; } + .ps-xl-5 { + padding-left: 3rem !important; } + .gap-xl-0 { + gap: 0 !important; } + .gap-xl-1 { + gap: 0.25rem !important; } + .gap-xl-2 { + gap: 0.5rem !important; } + .gap-xl-3 { + gap: 1rem !important; } + .gap-xl-4 { + gap: 1.5rem !important; } + .gap-xl-5 { + gap: 3rem !important; } + .row-gap-xl-0 { + row-gap: 0 !important; } + .row-gap-xl-1 { + row-gap: 0.25rem !important; } + .row-gap-xl-2 { + row-gap: 0.5rem !important; } + .row-gap-xl-3 { + row-gap: 1rem !important; } + .row-gap-xl-4 { + row-gap: 1.5rem !important; } + .row-gap-xl-5 { + row-gap: 3rem !important; } + .column-gap-xl-0 { + column-gap: 0 !important; } + .column-gap-xl-1 { + column-gap: 0.25rem !important; } + .column-gap-xl-2 { + column-gap: 0.5rem !important; } + .column-gap-xl-3 { + column-gap: 1rem !important; } + .column-gap-xl-4 { + column-gap: 1.5rem !important; } + .column-gap-xl-5 { + column-gap: 3rem !important; } + .text-xl-start { + text-align: left !important; } + .text-xl-end { + text-align: right !important; } + .text-xl-center { + text-align: center !important; } } + +@media (min-width: 1400px) { + .float-xxl-start { + float: left !important; } + .float-xxl-end { + float: right !important; } + .float-xxl-none { + float: none !important; } + .object-fit-xxl-contain { + object-fit: contain !important; } + .object-fit-xxl-cover { + object-fit: cover !important; } + .object-fit-xxl-fill { + object-fit: fill !important; } + .object-fit-xxl-scale { + object-fit: scale-down !important; } + .object-fit-xxl-none { + object-fit: none !important; } + .d-xxl-inline { + display: inline !important; } + .d-xxl-inline-block { + display: inline-block !important; } + .d-xxl-block { + display: block !important; } + .d-xxl-grid { + display: grid !important; } + .d-xxl-inline-grid { + display: inline-grid !important; } + .d-xxl-table { + display: table !important; } + .d-xxl-table-row { + display: table-row !important; } + .d-xxl-table-cell { + display: table-cell !important; } + .d-xxl-flex { + display: flex !important; } + .d-xxl-inline-flex { + display: inline-flex !important; } + .d-xxl-none { + display: none !important; } + .flex-xxl-fill { + flex: 1 1 auto !important; } + .flex-xxl-row { + flex-direction: row !important; } + .flex-xxl-column { + flex-direction: column !important; } + .flex-xxl-row-reverse { + flex-direction: row-reverse !important; } + .flex-xxl-column-reverse { + flex-direction: column-reverse !important; } + .flex-xxl-grow-0 { + flex-grow: 0 !important; } + .flex-xxl-grow-1 { + flex-grow: 1 !important; } + .flex-xxl-shrink-0 { + flex-shrink: 0 !important; } + .flex-xxl-shrink-1 { + flex-shrink: 1 !important; } + .flex-xxl-wrap { + flex-wrap: wrap !important; } + .flex-xxl-nowrap { + flex-wrap: nowrap !important; } + .flex-xxl-wrap-reverse { + flex-wrap: wrap-reverse !important; } + .justify-content-xxl-start { + justify-content: flex-start !important; } + .justify-content-xxl-end { + justify-content: flex-end !important; } + .justify-content-xxl-center { + justify-content: center !important; } + .justify-content-xxl-between { + justify-content: space-between !important; } + .justify-content-xxl-around { + justify-content: space-around !important; } + .justify-content-xxl-evenly { + justify-content: space-evenly !important; } + .align-items-xxl-start { + align-items: flex-start !important; } + .align-items-xxl-end { + align-items: flex-end !important; } + .align-items-xxl-center { + align-items: center !important; } + .align-items-xxl-baseline { + align-items: baseline !important; } + .align-items-xxl-stretch { + align-items: stretch !important; } + .align-content-xxl-start { + align-content: flex-start !important; } + .align-content-xxl-end { + align-content: flex-end !important; } + .align-content-xxl-center { + align-content: center !important; } + .align-content-xxl-between { + align-content: space-between !important; } + .align-content-xxl-around { + align-content: space-around !important; } + .align-content-xxl-stretch { + align-content: stretch !important; } + .align-self-xxl-auto { + align-self: auto !important; } + .align-self-xxl-start { + align-self: flex-start !important; } + .align-self-xxl-end { + align-self: flex-end !important; } + .align-self-xxl-center { + align-self: center !important; } + .align-self-xxl-baseline { + align-self: baseline !important; } + .align-self-xxl-stretch { + align-self: stretch !important; } + .order-xxl-first { + order: -1 !important; } + .order-xxl-0 { + order: 0 !important; } + .order-xxl-1 { + order: 1 !important; } + .order-xxl-2 { + order: 2 !important; } + .order-xxl-3 { + order: 3 !important; } + .order-xxl-4 { + order: 4 !important; } + .order-xxl-5 { + order: 5 !important; } + .order-xxl-last { + order: 6 !important; } + .m-xxl-0 { + margin: 0 !important; } + .m-xxl-1 { + margin: 0.25rem !important; } + .m-xxl-2 { + margin: 0.5rem !important; } + .m-xxl-3 { + margin: 1rem !important; } + .m-xxl-4 { + margin: 1.5rem !important; } + .m-xxl-5 { + margin: 3rem !important; } + .m-xxl-auto { + margin: auto !important; } + .mx-xxl-0 { + margin-right: 0 !important; + margin-left: 0 !important; } + .mx-xxl-1 { + margin-right: 0.25rem !important; + margin-left: 0.25rem !important; } + .mx-xxl-2 { + margin-right: 0.5rem !important; + margin-left: 0.5rem !important; } + .mx-xxl-3 { + margin-right: 1rem !important; + margin-left: 1rem !important; } + .mx-xxl-4 { + margin-right: 1.5rem !important; + margin-left: 1.5rem !important; } + .mx-xxl-5 { + margin-right: 3rem !important; + margin-left: 3rem !important; } + .mx-xxl-auto { + margin-right: auto !important; + margin-left: auto !important; } + .my-xxl-0 { + margin-top: 0 !important; + margin-bottom: 0 !important; } + .my-xxl-1 { + margin-top: 0.25rem !important; + margin-bottom: 0.25rem !important; } + .my-xxl-2 { + margin-top: 0.5rem !important; + margin-bottom: 0.5rem !important; } + .my-xxl-3 { + margin-top: 1rem !important; + margin-bottom: 1rem !important; } + .my-xxl-4 { + margin-top: 1.5rem !important; + margin-bottom: 1.5rem !important; } + .my-xxl-5 { + margin-top: 3rem !important; + margin-bottom: 3rem !important; } + .my-xxl-auto { + margin-top: auto !important; + margin-bottom: auto !important; } + .mt-xxl-0 { + margin-top: 0 !important; } + .mt-xxl-1 { + margin-top: 0.25rem !important; } + .mt-xxl-2 { + margin-top: 0.5rem !important; } + .mt-xxl-3 { + margin-top: 1rem !important; } + .mt-xxl-4 { + margin-top: 1.5rem !important; } + .mt-xxl-5 { + margin-top: 3rem !important; } + .mt-xxl-auto { + margin-top: auto !important; } + .me-xxl-0 { + margin-right: 0 !important; } + .me-xxl-1 { + margin-right: 0.25rem !important; } + .me-xxl-2 { + margin-right: 0.5rem !important; } + .me-xxl-3 { + margin-right: 1rem !important; } + .me-xxl-4 { + margin-right: 1.5rem !important; } + .me-xxl-5 { + margin-right: 3rem !important; } + .me-xxl-auto { + margin-right: auto !important; } + .mb-xxl-0 { + margin-bottom: 0 !important; } + .mb-xxl-1 { + margin-bottom: 0.25rem !important; } + .mb-xxl-2 { + margin-bottom: 0.5rem !important; } + .mb-xxl-3 { + margin-bottom: 1rem !important; } + .mb-xxl-4 { + margin-bottom: 1.5rem !important; } + .mb-xxl-5 { + margin-bottom: 3rem !important; } + .mb-xxl-auto { + margin-bottom: auto !important; } + .ms-xxl-0 { + margin-left: 0 !important; } + .ms-xxl-1 { + margin-left: 0.25rem !important; } + .ms-xxl-2 { + margin-left: 0.5rem !important; } + .ms-xxl-3 { + margin-left: 1rem !important; } + .ms-xxl-4 { + margin-left: 1.5rem !important; } + .ms-xxl-5 { + margin-left: 3rem !important; } + .ms-xxl-auto { + margin-left: auto !important; } + .p-xxl-0 { + padding: 0 !important; } + .p-xxl-1 { + padding: 0.25rem !important; } + .p-xxl-2 { + padding: 0.5rem !important; } + .p-xxl-3 { + padding: 1rem !important; } + .p-xxl-4 { + padding: 1.5rem !important; } + .p-xxl-5 { + padding: 3rem !important; } + .px-xxl-0 { + padding-right: 0 !important; + padding-left: 0 !important; } + .px-xxl-1 { + padding-right: 0.25rem !important; + padding-left: 0.25rem !important; } + .px-xxl-2 { + padding-right: 0.5rem !important; + padding-left: 0.5rem !important; } + .px-xxl-3 { + padding-right: 1rem !important; + padding-left: 1rem !important; } + .px-xxl-4 { + padding-right: 1.5rem !important; + padding-left: 1.5rem !important; } + .px-xxl-5 { + padding-right: 3rem !important; + padding-left: 3rem !important; } + .py-xxl-0 { + padding-top: 0 !important; + padding-bottom: 0 !important; } + .py-xxl-1 { + padding-top: 0.25rem !important; + padding-bottom: 0.25rem !important; } + .py-xxl-2 { + padding-top: 0.5rem !important; + padding-bottom: 0.5rem !important; } + .py-xxl-3 { + padding-top: 1rem !important; + padding-bottom: 1rem !important; } + .py-xxl-4 { + padding-top: 1.5rem !important; + padding-bottom: 1.5rem !important; } + .py-xxl-5 { + padding-top: 3rem !important; + padding-bottom: 3rem !important; } + .pt-xxl-0 { + padding-top: 0 !important; } + .pt-xxl-1 { + padding-top: 0.25rem !important; } + .pt-xxl-2 { + padding-top: 0.5rem !important; } + .pt-xxl-3 { + padding-top: 1rem !important; } + .pt-xxl-4 { + padding-top: 1.5rem !important; } + .pt-xxl-5 { + padding-top: 3rem !important; } + .pe-xxl-0 { + padding-right: 0 !important; } + .pe-xxl-1 { + padding-right: 0.25rem !important; } + .pe-xxl-2 { + padding-right: 0.5rem !important; } + .pe-xxl-3 { + padding-right: 1rem !important; } + .pe-xxl-4 { + padding-right: 1.5rem !important; } + .pe-xxl-5 { + padding-right: 3rem !important; } + .pb-xxl-0 { + padding-bottom: 0 !important; } + .pb-xxl-1 { + padding-bottom: 0.25rem !important; } + .pb-xxl-2 { + padding-bottom: 0.5rem !important; } + .pb-xxl-3 { + padding-bottom: 1rem !important; } + .pb-xxl-4 { + padding-bottom: 1.5rem !important; } + .pb-xxl-5 { + padding-bottom: 3rem !important; } + .ps-xxl-0 { + padding-left: 0 !important; } + .ps-xxl-1 { + padding-left: 0.25rem !important; } + .ps-xxl-2 { + padding-left: 0.5rem !important; } + .ps-xxl-3 { + padding-left: 1rem !important; } + .ps-xxl-4 { + padding-left: 1.5rem !important; } + .ps-xxl-5 { + padding-left: 3rem !important; } + .gap-xxl-0 { + gap: 0 !important; } + .gap-xxl-1 { + gap: 0.25rem !important; } + .gap-xxl-2 { + gap: 0.5rem !important; } + .gap-xxl-3 { + gap: 1rem !important; } + .gap-xxl-4 { + gap: 1.5rem !important; } + .gap-xxl-5 { + gap: 3rem !important; } + .row-gap-xxl-0 { + row-gap: 0 !important; } + .row-gap-xxl-1 { + row-gap: 0.25rem !important; } + .row-gap-xxl-2 { + row-gap: 0.5rem !important; } + .row-gap-xxl-3 { + row-gap: 1rem !important; } + .row-gap-xxl-4 { + row-gap: 1.5rem !important; } + .row-gap-xxl-5 { + row-gap: 3rem !important; } + .column-gap-xxl-0 { + column-gap: 0 !important; } + .column-gap-xxl-1 { + column-gap: 0.25rem !important; } + .column-gap-xxl-2 { + column-gap: 0.5rem !important; } + .column-gap-xxl-3 { + column-gap: 1rem !important; } + .column-gap-xxl-4 { + column-gap: 1.5rem !important; } + .column-gap-xxl-5 { + column-gap: 3rem !important; } + .text-xxl-start { + text-align: left !important; } + .text-xxl-end { + text-align: right !important; } + .text-xxl-center { + text-align: center !important; } } + +@media (min-width: 1200px) { + .fs-1 { + font-size: 2.5rem !important; } + .fs-2 { + font-size: 2rem !important; } + .fs-3 { + font-size: 1.5rem !important; } + .fs-4 { + font-size: 1.35rem !important; } } + +@media print { + .d-print-inline { + display: inline !important; } + .d-print-inline-block { + display: inline-block !important; } + .d-print-block { + display: block !important; } + .d-print-grid { + display: grid !important; } + .d-print-inline-grid { + display: inline-grid !important; } + .d-print-table { + display: table !important; } + .d-print-table-row { + display: table-row !important; } + .d-print-table-cell { + display: table-cell !important; } + .d-print-flex { + display: flex !important; } + .d-print-inline-flex { + display: inline-flex !important; } + .d-print-none { + display: none !important; } } + +/*! + * Font Awesome Free 6.5.2 by @fontawesome - https://fontawesome.com + * License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) + * Copyright 2024 Fonticons, Inc. + */ +.fa, .td-search__icon:before { + font-family: var(--fa-style-family, "Font Awesome 6 Free"); + font-weight: var(--fa-style, 900); } + +.fa, .td-search__icon:before, +.fa-classic, +.fa-sharp, +.fas, +.td-offline-search-results__close-button:after, +.fa-solid, +.far, +.fa-regular, +.fab, +.fa-brands { + -moz-osx-font-smoothing: grayscale; + -webkit-font-smoothing: antialiased; + display: var(--fa-display, inline-block); + font-style: normal; + font-variant: normal; + line-height: 1; + text-rendering: auto; } + +.fas, .td-offline-search-results__close-button:after, +.fa-classic, +.fa-solid, +.far, +.fa-regular { + font-family: 'Font Awesome 6 Free'; } + +.fab, +.fa-brands { + font-family: 'Font Awesome 6 Brands'; } + +.fa-1x { + font-size: 1em; } + +.fa-2x { + font-size: 2em; } + +.fa-3x { + font-size: 3em; } + +.fa-4x { + font-size: 4em; } + +.fa-5x { + font-size: 5em; } + +.fa-6x { + font-size: 6em; } + +.fa-7x { + font-size: 7em; } + +.fa-8x { + font-size: 8em; } + +.fa-9x { + font-size: 9em; } + +.fa-10x { + font-size: 10em; } + +.fa-2xs { + font-size: 0.625em; + line-height: 0.1em; + vertical-align: 0.225em; } + +.fa-xs { + font-size: 0.75em; + line-height: 0.08333333em; + vertical-align: 0.125em; } + +.fa-sm { + font-size: 0.875em; + line-height: 0.07142857em; + vertical-align: 0.05357143em; } + +.fa-lg { + font-size: 1.25em; + line-height: 0.05em; + vertical-align: -0.075em; } + +.fa-xl { + font-size: 1.5em; + line-height: 0.04166667em; + vertical-align: -0.125em; } + +.fa-2xl { + font-size: 2em; + line-height: 0.03125em; + vertical-align: -0.1875em; } + +.fa-fw { + text-align: center; + width: 1.25em; } + +.fa-ul { + list-style-type: none; + margin-left: var(--fa-li-margin, 2.5em); + padding-left: 0; } + .fa-ul > li { + position: relative; } + +.fa-li { + left: calc(var(--fa-li-width, 2em) * -1); + position: absolute; + text-align: center; + width: var(--fa-li-width, 2em); + line-height: inherit; } + +.fa-border { + border-color: var(--fa-border-color, #eee); + border-radius: var(--fa-border-radius, 0.1em); + border-style: var(--fa-border-style, solid); + border-width: var(--fa-border-width, 0.08em); + padding: var(--fa-border-padding, 0.2em 0.25em 0.15em); } + +.fa-pull-left { + float: left; + margin-right: var(--fa-pull-margin, 0.3em); } + +.fa-pull-right { + float: right; + margin-left: var(--fa-pull-margin, 0.3em); } + +.fa-beat { + animation-name: fa-beat; + animation-delay: var(--fa-animation-delay, 0s); + animation-direction: var(--fa-animation-direction, normal); + animation-duration: var(--fa-animation-duration, 1s); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-timing-function: var(--fa-animation-timing, ease-in-out); } + +.fa-bounce { + animation-name: fa-bounce; + animation-delay: var(--fa-animation-delay, 0s); + animation-direction: var(--fa-animation-direction, normal); + animation-duration: var(--fa-animation-duration, 1s); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.28, 0.84, 0.42, 1)); } + +.fa-fade { + animation-name: fa-fade; + animation-delay: var(--fa-animation-delay, 0s); + animation-direction: var(--fa-animation-direction, normal); + animation-duration: var(--fa-animation-duration, 1s); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1)); } + +.fa-beat-fade { + animation-name: fa-beat-fade; + animation-delay: var(--fa-animation-delay, 0s); + animation-direction: var(--fa-animation-direction, normal); + animation-duration: var(--fa-animation-duration, 1s); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1)); } + +.fa-flip { + animation-name: fa-flip; + animation-delay: var(--fa-animation-delay, 0s); + animation-direction: var(--fa-animation-direction, normal); + animation-duration: var(--fa-animation-duration, 1s); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-timing-function: var(--fa-animation-timing, ease-in-out); } + +.fa-shake { + animation-name: fa-shake; + animation-delay: var(--fa-animation-delay, 0s); + animation-direction: var(--fa-animation-direction, normal); + animation-duration: var(--fa-animation-duration, 1s); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-timing-function: var(--fa-animation-timing, linear); } + +.fa-spin { + animation-name: fa-spin; + animation-delay: var(--fa-animation-delay, 0s); + animation-direction: var(--fa-animation-direction, normal); + animation-duration: var(--fa-animation-duration, 2s); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-timing-function: var(--fa-animation-timing, linear); } + +.fa-spin-reverse { + --fa-animation-direction: reverse; } + +.fa-pulse, +.fa-spin-pulse { + animation-name: fa-spin; + animation-direction: var(--fa-animation-direction, normal); + animation-duration: var(--fa-animation-duration, 1s); + animation-iteration-count: var(--fa-animation-iteration-count, infinite); + animation-timing-function: var(--fa-animation-timing, steps(8)); } + +@media (prefers-reduced-motion: reduce) { + .fa-beat, + .fa-bounce, + .fa-fade, + .fa-beat-fade, + .fa-flip, + .fa-pulse, + .fa-shake, + .fa-spin, + .fa-spin-pulse { + animation-delay: -1ms; + animation-duration: 1ms; + animation-iteration-count: 1; + transition-delay: 0s; + transition-duration: 0s; } } + +@keyframes fa-beat { + 0%, 90% { + transform: scale(1); } + 45% { + transform: scale(var(--fa-beat-scale, 1.25)); } } + +@keyframes fa-bounce { + 0% { + transform: scale(1, 1) translateY(0); } + 10% { + transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0); } + 30% { + transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em)); } + 50% { + transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0); } + 57% { + transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em)); } + 64% { + transform: scale(1, 1) translateY(0); } + 100% { + transform: scale(1, 1) translateY(0); } } + +@keyframes fa-fade { + 50% { + opacity: var(--fa-fade-opacity, 0.4); } } + +@keyframes fa-beat-fade { + 0%, 100% { + opacity: var(--fa-beat-fade-opacity, 0.4); + transform: scale(1); } + 50% { + opacity: 1; + transform: scale(var(--fa-beat-fade-scale, 1.125)); } } + +@keyframes fa-flip { + 50% { + transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg)); } } + +@keyframes fa-shake { + 0% { + transform: rotate(-15deg); } + 4% { + transform: rotate(15deg); } + 8%, 24% { + transform: rotate(-18deg); } + 12%, 28% { + transform: rotate(18deg); } + 16% { + transform: rotate(-22deg); } + 20% { + transform: rotate(22deg); } + 32% { + transform: rotate(-12deg); } + 36% { + transform: rotate(12deg); } + 40%, 100% { + transform: rotate(0deg); } } + +@keyframes fa-spin { + 0% { + transform: rotate(0deg); } + 100% { + transform: rotate(360deg); } } + +.fa-rotate-90 { + transform: rotate(90deg); } + +.fa-rotate-180 { + transform: rotate(180deg); } + +.fa-rotate-270 { + transform: rotate(270deg); } + +.fa-flip-horizontal { + transform: scale(-1, 1); } + +.fa-flip-vertical { + transform: scale(1, -1); } + +.fa-flip-both, +.fa-flip-horizontal.fa-flip-vertical { + transform: scale(-1, -1); } + +.fa-rotate-by { + transform: rotate(var(--fa-rotate-angle, 0)); } + +.fa-stack { + display: inline-block; + height: 2em; + line-height: 2em; + position: relative; + vertical-align: middle; + width: 2.5em; } + +.fa-stack-1x, +.fa-stack-2x { + left: 0; + position: absolute; + text-align: center; + width: 100%; + z-index: var(--fa-stack-z-index, auto); } + +.fa-stack-1x { + line-height: inherit; } + +.fa-stack-2x { + font-size: 2em; } + +.fa-inverse { + color: var(--fa-inverse, #fff); } + +/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen +readers do not read off random characters that represent icons */ +.fa-0::before { + content: "\30"; } + +.fa-1::before { + content: "\31"; } + +.fa-2::before { + content: "\32"; } + +.fa-3::before { + content: "\33"; } + +.fa-4::before { + content: "\34"; } + +.fa-5::before { + content: "\35"; } + +.fa-6::before { + content: "\36"; } + +.fa-7::before { + content: "\37"; } + +.fa-8::before { + content: "\38"; } + +.fa-9::before { + content: "\39"; } + +.fa-fill-drip::before { + content: "\f576"; } + +.fa-arrows-to-circle::before { + content: "\e4bd"; } + +.fa-circle-chevron-right::before { + content: "\f138"; } + +.fa-chevron-circle-right::before { + content: "\f138"; } + +.fa-at::before { + content: "\40"; } + +.fa-trash-can::before { + content: "\f2ed"; } + +.fa-trash-alt::before { + content: "\f2ed"; } + +.fa-text-height::before { + content: "\f034"; } + +.fa-user-xmark::before { + content: "\f235"; } + +.fa-user-times::before { + content: "\f235"; } + +.fa-stethoscope::before { + content: "\f0f1"; } + +.fa-message::before { + content: "\f27a"; } + +.fa-comment-alt::before { + content: "\f27a"; } + +.fa-info::before { + content: "\f129"; } + +.fa-down-left-and-up-right-to-center::before { + content: "\f422"; } + +.fa-compress-alt::before { + content: "\f422"; } + +.fa-explosion::before { + content: "\e4e9"; } + +.fa-file-lines::before { + content: "\f15c"; } + +.fa-file-alt::before { + content: "\f15c"; } + +.fa-file-text::before { + content: "\f15c"; } + +.fa-wave-square::before { + content: "\f83e"; } + +.fa-ring::before { + content: "\f70b"; } + +.fa-building-un::before { + content: "\e4d9"; } + +.fa-dice-three::before { + content: "\f527"; } + +.fa-calendar-days::before { + content: "\f073"; } + +.fa-calendar-alt::before { + content: "\f073"; } + +.fa-anchor-circle-check::before { + content: "\e4aa"; } + +.fa-building-circle-arrow-right::before { + content: "\e4d1"; } + +.fa-volleyball::before { + content: "\f45f"; } + +.fa-volleyball-ball::before { + content: "\f45f"; } + +.fa-arrows-up-to-line::before { + content: "\e4c2"; } + +.fa-sort-down::before { + content: "\f0dd"; } + +.fa-sort-desc::before { + content: "\f0dd"; } + +.fa-circle-minus::before { + content: "\f056"; } + +.fa-minus-circle::before { + content: "\f056"; } + +.fa-door-open::before { + content: "\f52b"; } + +.fa-right-from-bracket::before { + content: "\f2f5"; } + +.fa-sign-out-alt::before { + content: "\f2f5"; } + +.fa-atom::before { + content: "\f5d2"; } + +.fa-soap::before { + content: "\e06e"; } + +.fa-icons::before { + content: "\f86d"; } + +.fa-heart-music-camera-bolt::before { + content: "\f86d"; } + +.fa-microphone-lines-slash::before { + content: "\f539"; } + +.fa-microphone-alt-slash::before { + content: "\f539"; } + +.fa-bridge-circle-check::before { + content: "\e4c9"; } + +.fa-pump-medical::before { + content: "\e06a"; } + +.fa-fingerprint::before { + content: "\f577"; } + +.fa-hand-point-right::before { + content: "\f0a4"; } + +.fa-magnifying-glass-location::before { + content: "\f689"; } + +.fa-search-location::before { + content: "\f689"; } + +.fa-forward-step::before { + content: "\f051"; } + +.fa-step-forward::before { + content: "\f051"; } + +.fa-face-smile-beam::before { + content: "\f5b8"; } + +.fa-smile-beam::before { + content: "\f5b8"; } + +.fa-flag-checkered::before { + content: "\f11e"; } + +.fa-football::before { + content: "\f44e"; } + +.fa-football-ball::before { + content: "\f44e"; } + +.fa-school-circle-exclamation::before { + content: "\e56c"; } + +.fa-crop::before { + content: "\f125"; } + +.fa-angles-down::before { + content: "\f103"; } + +.fa-angle-double-down::before { + content: "\f103"; } + +.fa-users-rectangle::before { + content: "\e594"; } + +.fa-people-roof::before { + content: "\e537"; } + +.fa-people-line::before { + content: "\e534"; } + +.fa-beer-mug-empty::before { + content: "\f0fc"; } + +.fa-beer::before { + content: "\f0fc"; } + +.fa-diagram-predecessor::before { + content: "\e477"; } + +.fa-arrow-up-long::before { + content: "\f176"; } + +.fa-long-arrow-up::before { + content: "\f176"; } + +.fa-fire-flame-simple::before { + content: "\f46a"; } + +.fa-burn::before { + content: "\f46a"; } + +.fa-person::before { + content: "\f183"; } + +.fa-male::before { + content: "\f183"; } + +.fa-laptop::before { + content: "\f109"; } + +.fa-file-csv::before { + content: "\f6dd"; } + +.fa-menorah::before { + content: "\f676"; } + +.fa-truck-plane::before { + content: "\e58f"; } + +.fa-record-vinyl::before { + content: "\f8d9"; } + +.fa-face-grin-stars::before { + content: "\f587"; } + +.fa-grin-stars::before { + content: "\f587"; } + +.fa-bong::before { + content: "\f55c"; } + +.fa-spaghetti-monster-flying::before { + content: "\f67b"; } + +.fa-pastafarianism::before { + content: "\f67b"; } + +.fa-arrow-down-up-across-line::before { + content: "\e4af"; } + +.fa-spoon::before { + content: "\f2e5"; } + +.fa-utensil-spoon::before { + content: "\f2e5"; } + +.fa-jar-wheat::before { + content: "\e517"; } + +.fa-envelopes-bulk::before { + content: "\f674"; } + +.fa-mail-bulk::before { + content: "\f674"; } + +.fa-file-circle-exclamation::before { + content: "\e4eb"; } + +.fa-circle-h::before { + content: "\f47e"; } + +.fa-hospital-symbol::before { + content: "\f47e"; } + +.fa-pager::before { + content: "\f815"; } + +.fa-address-book::before { + content: "\f2b9"; } + +.fa-contact-book::before { + content: "\f2b9"; } + +.fa-strikethrough::before { + content: "\f0cc"; } + +.fa-k::before { + content: "\4b"; } + +.fa-landmark-flag::before { + content: "\e51c"; } + +.fa-pencil::before { + content: "\f303"; } + +.fa-pencil-alt::before { + content: "\f303"; } + +.fa-backward::before { + content: "\f04a"; } + +.fa-caret-right::before { + content: "\f0da"; } + +.fa-comments::before { + content: "\f086"; } + +.fa-paste::before { + content: "\f0ea"; } + +.fa-file-clipboard::before { + content: "\f0ea"; } + +.fa-code-pull-request::before { + content: "\e13c"; } + +.fa-clipboard-list::before { + content: "\f46d"; } + +.fa-truck-ramp-box::before { + content: "\f4de"; } + +.fa-truck-loading::before { + content: "\f4de"; } + +.fa-user-check::before { + content: "\f4fc"; } + +.fa-vial-virus::before { + content: "\e597"; } + +.fa-sheet-plastic::before { + content: "\e571"; } + +.fa-blog::before { + content: "\f781"; } + +.fa-user-ninja::before { + content: "\f504"; } + +.fa-person-arrow-up-from-line::before { + content: "\e539"; } + +.fa-scroll-torah::before { + content: "\f6a0"; } + +.fa-torah::before { + content: "\f6a0"; } + +.fa-broom-ball::before { + content: "\f458"; } + +.fa-quidditch::before { + content: "\f458"; } + +.fa-quidditch-broom-ball::before { + content: "\f458"; } + +.fa-toggle-off::before { + content: "\f204"; } + +.fa-box-archive::before { + content: "\f187"; } + +.fa-archive::before { + content: "\f187"; } + +.fa-person-drowning::before { + content: "\e545"; } + +.fa-arrow-down-9-1::before { + content: "\f886"; } + +.fa-sort-numeric-desc::before { + content: "\f886"; } + +.fa-sort-numeric-down-alt::before { + content: "\f886"; } + +.fa-face-grin-tongue-squint::before { + content: "\f58a"; } + +.fa-grin-tongue-squint::before { + content: "\f58a"; } + +.fa-spray-can::before { + content: "\f5bd"; } + +.fa-truck-monster::before { + content: "\f63b"; } + +.fa-w::before { + content: "\57"; } + +.fa-earth-africa::before { + content: "\f57c"; } + +.fa-globe-africa::before { + content: "\f57c"; } + +.fa-rainbow::before { + content: "\f75b"; } + +.fa-circle-notch::before { + content: "\f1ce"; } + +.fa-tablet-screen-button::before { + content: "\f3fa"; } + +.fa-tablet-alt::before { + content: "\f3fa"; } + +.fa-paw::before { + content: "\f1b0"; } + +.fa-cloud::before { + content: "\f0c2"; } + +.fa-trowel-bricks::before { + content: "\e58a"; } + +.fa-face-flushed::before { + content: "\f579"; } + +.fa-flushed::before { + content: "\f579"; } + +.fa-hospital-user::before { + content: "\f80d"; } + +.fa-tent-arrow-left-right::before { + content: "\e57f"; } + +.fa-gavel::before { + content: "\f0e3"; } + +.fa-legal::before { + content: "\f0e3"; } + +.fa-binoculars::before { + content: "\f1e5"; } + +.fa-microphone-slash::before { + content: "\f131"; } + +.fa-box-tissue::before { + content: "\e05b"; } + +.fa-motorcycle::before { + content: "\f21c"; } + +.fa-bell-concierge::before { + content: "\f562"; } + +.fa-concierge-bell::before { + content: "\f562"; } + +.fa-pen-ruler::before { + content: "\f5ae"; } + +.fa-pencil-ruler::before { + content: "\f5ae"; } + +.fa-people-arrows::before { + content: "\e068"; } + +.fa-people-arrows-left-right::before { + content: "\e068"; } + +.fa-mars-and-venus-burst::before { + content: "\e523"; } + +.fa-square-caret-right::before { + content: "\f152"; } + +.fa-caret-square-right::before { + content: "\f152"; } + +.fa-scissors::before { + content: "\f0c4"; } + +.fa-cut::before { + content: "\f0c4"; } + +.fa-sun-plant-wilt::before { + content: "\e57a"; } + +.fa-toilets-portable::before { + content: "\e584"; } + +.fa-hockey-puck::before { + content: "\f453"; } + +.fa-table::before { + content: "\f0ce"; } + +.fa-magnifying-glass-arrow-right::before { + content: "\e521"; } + +.fa-tachograph-digital::before { + content: "\f566"; } + +.fa-digital-tachograph::before { + content: "\f566"; } + +.fa-users-slash::before { + content: "\e073"; } + +.fa-clover::before { + content: "\e139"; } + +.fa-reply::before { + content: "\f3e5"; } + +.fa-mail-reply::before { + content: "\f3e5"; } + +.fa-star-and-crescent::before { + content: "\f699"; } + +.fa-house-fire::before { + content: "\e50c"; } + +.fa-square-minus::before { + content: "\f146"; } + +.fa-minus-square::before { + content: "\f146"; } + +.fa-helicopter::before { + content: "\f533"; } + +.fa-compass::before { + content: "\f14e"; } + +.fa-square-caret-down::before { + content: "\f150"; } + +.fa-caret-square-down::before { + content: "\f150"; } + +.fa-file-circle-question::before { + content: "\e4ef"; } + +.fa-laptop-code::before { + content: "\f5fc"; } + +.fa-swatchbook::before { + content: "\f5c3"; } + +.fa-prescription-bottle::before { + content: "\f485"; } + +.fa-bars::before { + content: "\f0c9"; } + +.fa-navicon::before { + content: "\f0c9"; } + +.fa-people-group::before { + content: "\e533"; } + +.fa-hourglass-end::before { + content: "\f253"; } + +.fa-hourglass-3::before { + content: "\f253"; } + +.fa-heart-crack::before { + content: "\f7a9"; } + +.fa-heart-broken::before { + content: "\f7a9"; } + +.fa-square-up-right::before { + content: "\f360"; } + +.fa-external-link-square-alt::before { + content: "\f360"; } + +.fa-face-kiss-beam::before { + content: "\f597"; } + +.fa-kiss-beam::before { + content: "\f597"; } + +.fa-film::before { + content: "\f008"; } + +.fa-ruler-horizontal::before { + content: "\f547"; } + +.fa-people-robbery::before { + content: "\e536"; } + +.fa-lightbulb::before { + content: "\f0eb"; } + +.fa-caret-left::before { + content: "\f0d9"; } + +.fa-circle-exclamation::before { + content: "\f06a"; } + +.fa-exclamation-circle::before { + content: "\f06a"; } + +.fa-school-circle-xmark::before { + content: "\e56d"; } + +.fa-arrow-right-from-bracket::before { + content: "\f08b"; } + +.fa-sign-out::before { + content: "\f08b"; } + +.fa-circle-chevron-down::before { + content: "\f13a"; } + +.fa-chevron-circle-down::before { + content: "\f13a"; } + +.fa-unlock-keyhole::before { + content: "\f13e"; } + +.fa-unlock-alt::before { + content: "\f13e"; } + +.fa-cloud-showers-heavy::before { + content: "\f740"; } + +.fa-headphones-simple::before { + content: "\f58f"; } + +.fa-headphones-alt::before { + content: "\f58f"; } + +.fa-sitemap::before { + content: "\f0e8"; } + +.fa-circle-dollar-to-slot::before { + content: "\f4b9"; } + +.fa-donate::before { + content: "\f4b9"; } + +.fa-memory::before { + content: "\f538"; } + +.fa-road-spikes::before { + content: "\e568"; } + +.fa-fire-burner::before { + content: "\e4f1"; } + +.fa-flag::before { + content: "\f024"; } + +.fa-hanukiah::before { + content: "\f6e6"; } + +.fa-feather::before { + content: "\f52d"; } + +.fa-volume-low::before { + content: "\f027"; } + +.fa-volume-down::before { + content: "\f027"; } + +.fa-comment-slash::before { + content: "\f4b3"; } + +.fa-cloud-sun-rain::before { + content: "\f743"; } + +.fa-compress::before { + content: "\f066"; } + +.fa-wheat-awn::before { + content: "\e2cd"; } + +.fa-wheat-alt::before { + content: "\e2cd"; } + +.fa-ankh::before { + content: "\f644"; } + +.fa-hands-holding-child::before { + content: "\e4fa"; } + +.fa-asterisk::before { + content: "\2a"; } + +.fa-square-check::before { + content: "\f14a"; } + +.fa-check-square::before { + content: "\f14a"; } + +.fa-peseta-sign::before { + content: "\e221"; } + +.fa-heading::before { + content: "\f1dc"; } + +.fa-header::before { + content: "\f1dc"; } + +.fa-ghost::before { + content: "\f6e2"; } + +.fa-list::before { + content: "\f03a"; } + +.fa-list-squares::before { + content: "\f03a"; } + +.fa-square-phone-flip::before { + content: "\f87b"; } + +.fa-phone-square-alt::before { + content: "\f87b"; } + +.fa-cart-plus::before { + content: "\f217"; } + +.fa-gamepad::before { + content: "\f11b"; } + +.fa-circle-dot::before { + content: "\f192"; } + +.fa-dot-circle::before { + content: "\f192"; } + +.fa-face-dizzy::before { + content: "\f567"; } + +.fa-dizzy::before { + content: "\f567"; } + +.fa-egg::before { + content: "\f7fb"; } + +.fa-house-medical-circle-xmark::before { + content: "\e513"; } + +.fa-campground::before { + content: "\f6bb"; } + +.fa-folder-plus::before { + content: "\f65e"; } + +.fa-futbol::before { + content: "\f1e3"; } + +.fa-futbol-ball::before { + content: "\f1e3"; } + +.fa-soccer-ball::before { + content: "\f1e3"; } + +.fa-paintbrush::before { + content: "\f1fc"; } + +.fa-paint-brush::before { + content: "\f1fc"; } + +.fa-lock::before { + content: "\f023"; } + +.fa-gas-pump::before { + content: "\f52f"; } + +.fa-hot-tub-person::before { + content: "\f593"; } + +.fa-hot-tub::before { + content: "\f593"; } + +.fa-map-location::before { + content: "\f59f"; } + +.fa-map-marked::before { + content: "\f59f"; } + +.fa-house-flood-water::before { + content: "\e50e"; } + +.fa-tree::before { + content: "\f1bb"; } + +.fa-bridge-lock::before { + content: "\e4cc"; } + +.fa-sack-dollar::before { + content: "\f81d"; } + +.fa-pen-to-square::before { + content: "\f044"; } + +.fa-edit::before { + content: "\f044"; } + +.fa-car-side::before { + content: "\f5e4"; } + +.fa-share-nodes::before { + content: "\f1e0"; } + +.fa-share-alt::before { + content: "\f1e0"; } + +.fa-heart-circle-minus::before { + content: "\e4ff"; } + +.fa-hourglass-half::before { + content: "\f252"; } + +.fa-hourglass-2::before { + content: "\f252"; } + +.fa-microscope::before { + content: "\f610"; } + +.fa-sink::before { + content: "\e06d"; } + +.fa-bag-shopping::before { + content: "\f290"; } + +.fa-shopping-bag::before { + content: "\f290"; } + +.fa-arrow-down-z-a::before { + content: "\f881"; } + +.fa-sort-alpha-desc::before { + content: "\f881"; } + +.fa-sort-alpha-down-alt::before { + content: "\f881"; } + +.fa-mitten::before { + content: "\f7b5"; } + +.fa-person-rays::before { + content: "\e54d"; } + +.fa-users::before { + content: "\f0c0"; } + +.fa-eye-slash::before { + content: "\f070"; } + +.fa-flask-vial::before { + content: "\e4f3"; } + +.fa-hand::before { + content: "\f256"; } + +.fa-hand-paper::before { + content: "\f256"; } + +.fa-om::before { + content: "\f679"; } + +.fa-worm::before { + content: "\e599"; } + +.fa-house-circle-xmark::before { + content: "\e50b"; } + +.fa-plug::before { + content: "\f1e6"; } + +.fa-chevron-up::before { + content: "\f077"; } + +.fa-hand-spock::before { + content: "\f259"; } + +.fa-stopwatch::before { + content: "\f2f2"; } + +.fa-face-kiss::before { + content: "\f596"; } + +.fa-kiss::before { + content: "\f596"; } + +.fa-bridge-circle-xmark::before { + content: "\e4cb"; } + +.fa-face-grin-tongue::before { + content: "\f589"; } + +.fa-grin-tongue::before { + content: "\f589"; } + +.fa-chess-bishop::before { + content: "\f43a"; } + +.fa-face-grin-wink::before { + content: "\f58c"; } + +.fa-grin-wink::before { + content: "\f58c"; } + +.fa-ear-deaf::before { + content: "\f2a4"; } + +.fa-deaf::before { + content: "\f2a4"; } + +.fa-deafness::before { + content: "\f2a4"; } + +.fa-hard-of-hearing::before { + content: "\f2a4"; } + +.fa-road-circle-check::before { + content: "\e564"; } + +.fa-dice-five::before { + content: "\f523"; } + +.fa-square-rss::before { + content: "\f143"; } + +.fa-rss-square::before { + content: "\f143"; } + +.fa-land-mine-on::before { + content: "\e51b"; } + +.fa-i-cursor::before { + content: "\f246"; } + +.fa-stamp::before { + content: "\f5bf"; } + +.fa-stairs::before { + content: "\e289"; } + +.fa-i::before { + content: "\49"; } + +.fa-hryvnia-sign::before { + content: "\f6f2"; } + +.fa-hryvnia::before { + content: "\f6f2"; } + +.fa-pills::before { + content: "\f484"; } + +.fa-face-grin-wide::before { + content: "\f581"; } + +.fa-grin-alt::before { + content: "\f581"; } + +.fa-tooth::before { + content: "\f5c9"; } + +.fa-v::before { + content: "\56"; } + +.fa-bangladeshi-taka-sign::before { + content: "\e2e6"; } + +.fa-bicycle::before { + content: "\f206"; } + +.fa-staff-snake::before { + content: "\e579"; } + +.fa-rod-asclepius::before { + content: "\e579"; } + +.fa-rod-snake::before { + content: "\e579"; } + +.fa-staff-aesculapius::before { + content: "\e579"; } + +.fa-head-side-cough-slash::before { + content: "\e062"; } + +.fa-truck-medical::before { + content: "\f0f9"; } + +.fa-ambulance::before { + content: "\f0f9"; } + +.fa-wheat-awn-circle-exclamation::before { + content: "\e598"; } + +.fa-snowman::before { + content: "\f7d0"; } + +.fa-mortar-pestle::before { + content: "\f5a7"; } + +.fa-road-barrier::before { + content: "\e562"; } + +.fa-school::before { + content: "\f549"; } + +.fa-igloo::before { + content: "\f7ae"; } + +.fa-joint::before { + content: "\f595"; } + +.fa-angle-right::before { + content: "\f105"; } + +.fa-horse::before { + content: "\f6f0"; } + +.fa-q::before { + content: "\51"; } + +.fa-g::before { + content: "\47"; } + +.fa-notes-medical::before { + content: "\f481"; } + +.fa-temperature-half::before { + content: "\f2c9"; } + +.fa-temperature-2::before { + content: "\f2c9"; } + +.fa-thermometer-2::before { + content: "\f2c9"; } + +.fa-thermometer-half::before { + content: "\f2c9"; } + +.fa-dong-sign::before { + content: "\e169"; } + +.fa-capsules::before { + content: "\f46b"; } + +.fa-poo-storm::before { + content: "\f75a"; } + +.fa-poo-bolt::before { + content: "\f75a"; } + +.fa-face-frown-open::before { + content: "\f57a"; } + +.fa-frown-open::before { + content: "\f57a"; } + +.fa-hand-point-up::before { + content: "\f0a6"; } + +.fa-money-bill::before { + content: "\f0d6"; } + +.fa-bookmark::before { + content: "\f02e"; } + +.fa-align-justify::before { + content: "\f039"; } + +.fa-umbrella-beach::before { + content: "\f5ca"; } + +.fa-helmet-un::before { + content: "\e503"; } + +.fa-bullseye::before { + content: "\f140"; } + +.fa-bacon::before { + content: "\f7e5"; } + +.fa-hand-point-down::before { + content: "\f0a7"; } + +.fa-arrow-up-from-bracket::before { + content: "\e09a"; } + +.fa-folder::before { + content: "\f07b"; } + +.fa-folder-blank::before { + content: "\f07b"; } + +.fa-file-waveform::before { + content: "\f478"; } + +.fa-file-medical-alt::before { + content: "\f478"; } + +.fa-radiation::before { + content: "\f7b9"; } + +.fa-chart-simple::before { + content: "\e473"; } + +.fa-mars-stroke::before { + content: "\f229"; } + +.fa-vial::before { + content: "\f492"; } + +.fa-gauge::before { + content: "\f624"; } + +.fa-dashboard::before { + content: "\f624"; } + +.fa-gauge-med::before { + content: "\f624"; } + +.fa-tachometer-alt-average::before { + content: "\f624"; } + +.fa-wand-magic-sparkles::before { + content: "\e2ca"; } + +.fa-magic-wand-sparkles::before { + content: "\e2ca"; } + +.fa-e::before { + content: "\45"; } + +.fa-pen-clip::before { + content: "\f305"; } + +.fa-pen-alt::before { + content: "\f305"; } + +.fa-bridge-circle-exclamation::before { + content: "\e4ca"; } + +.fa-user::before { + content: "\f007"; } + +.fa-school-circle-check::before { + content: "\e56b"; } + +.fa-dumpster::before { + content: "\f793"; } + +.fa-van-shuttle::before { + content: "\f5b6"; } + +.fa-shuttle-van::before { + content: "\f5b6"; } + +.fa-building-user::before { + content: "\e4da"; } + +.fa-square-caret-left::before { + content: "\f191"; } + +.fa-caret-square-left::before { + content: "\f191"; } + +.fa-highlighter::before { + content: "\f591"; } + +.fa-key::before { + content: "\f084"; } + +.fa-bullhorn::before { + content: "\f0a1"; } + +.fa-globe::before { + content: "\f0ac"; } + +.fa-synagogue::before { + content: "\f69b"; } + +.fa-person-half-dress::before { + content: "\e548"; } + +.fa-road-bridge::before { + content: "\e563"; } + +.fa-location-arrow::before { + content: "\f124"; } + +.fa-c::before { + content: "\43"; } + +.fa-tablet-button::before { + content: "\f10a"; } + +.fa-building-lock::before { + content: "\e4d6"; } + +.fa-pizza-slice::before { + content: "\f818"; } + +.fa-money-bill-wave::before { + content: "\f53a"; } + +.fa-chart-area::before { + content: "\f1fe"; } + +.fa-area-chart::before { + content: "\f1fe"; } + +.fa-house-flag::before { + content: "\e50d"; } + +.fa-person-circle-minus::before { + content: "\e540"; } + +.fa-ban::before { + content: "\f05e"; } + +.fa-cancel::before { + content: "\f05e"; } + +.fa-camera-rotate::before { + content: "\e0d8"; } + +.fa-spray-can-sparkles::before { + content: "\f5d0"; } + +.fa-air-freshener::before { + content: "\f5d0"; } + +.fa-star::before { + content: "\f005"; } + +.fa-repeat::before { + content: "\f363"; } + +.fa-cross::before { + content: "\f654"; } + +.fa-box::before { + content: "\f466"; } + +.fa-venus-mars::before { + content: "\f228"; } + +.fa-arrow-pointer::before { + content: "\f245"; } + +.fa-mouse-pointer::before { + content: "\f245"; } + +.fa-maximize::before { + content: "\f31e"; } + +.fa-expand-arrows-alt::before { + content: "\f31e"; } + +.fa-charging-station::before { + content: "\f5e7"; } + +.fa-shapes::before { + content: "\f61f"; } + +.fa-triangle-circle-square::before { + content: "\f61f"; } + +.fa-shuffle::before { + content: "\f074"; } + +.fa-random::before { + content: "\f074"; } + +.fa-person-running::before { + content: "\f70c"; } + +.fa-running::before { + content: "\f70c"; } + +.fa-mobile-retro::before { + content: "\e527"; } + +.fa-grip-lines-vertical::before { + content: "\f7a5"; } + +.fa-spider::before { + content: "\f717"; } + +.fa-hands-bound::before { + content: "\e4f9"; } + +.fa-file-invoice-dollar::before { + content: "\f571"; } + +.fa-plane-circle-exclamation::before { + content: "\e556"; } + +.fa-x-ray::before { + content: "\f497"; } + +.fa-spell-check::before { + content: "\f891"; } + +.fa-slash::before { + content: "\f715"; } + +.fa-computer-mouse::before { + content: "\f8cc"; } + +.fa-mouse::before { + content: "\f8cc"; } + +.fa-arrow-right-to-bracket::before { + content: "\f090"; } + +.fa-sign-in::before { + content: "\f090"; } + +.fa-shop-slash::before { + content: "\e070"; } + +.fa-store-alt-slash::before { + content: "\e070"; } + +.fa-server::before { + content: "\f233"; } + +.fa-virus-covid-slash::before { + content: "\e4a9"; } + +.fa-shop-lock::before { + content: "\e4a5"; } + +.fa-hourglass-start::before { + content: "\f251"; } + +.fa-hourglass-1::before { + content: "\f251"; } + +.fa-blender-phone::before { + content: "\f6b6"; } + +.fa-building-wheat::before { + content: "\e4db"; } + +.fa-person-breastfeeding::before { + content: "\e53a"; } + +.fa-right-to-bracket::before { + content: "\f2f6"; } + +.fa-sign-in-alt::before { + content: "\f2f6"; } + +.fa-venus::before { + content: "\f221"; } + +.fa-passport::before { + content: "\f5ab"; } + +.fa-heart-pulse::before { + content: "\f21e"; } + +.fa-heartbeat::before { + content: "\f21e"; } + +.fa-people-carry-box::before { + content: "\f4ce"; } + +.fa-people-carry::before { + content: "\f4ce"; } + +.fa-temperature-high::before { + content: "\f769"; } + +.fa-microchip::before { + content: "\f2db"; } + +.fa-crown::before { + content: "\f521"; } + +.fa-weight-hanging::before { + content: "\f5cd"; } + +.fa-xmarks-lines::before { + content: "\e59a"; } + +.fa-file-prescription::before { + content: "\f572"; } + +.fa-weight-scale::before { + content: "\f496"; } + +.fa-weight::before { + content: "\f496"; } + +.fa-user-group::before { + content: "\f500"; } + +.fa-user-friends::before { + content: "\f500"; } + +.fa-arrow-up-a-z::before { + content: "\f15e"; } + +.fa-sort-alpha-up::before { + content: "\f15e"; } + +.fa-chess-knight::before { + content: "\f441"; } + +.fa-face-laugh-squint::before { + content: "\f59b"; } + +.fa-laugh-squint::before { + content: "\f59b"; } + +.fa-wheelchair::before { + content: "\f193"; } + +.fa-circle-arrow-up::before { + content: "\f0aa"; } + +.fa-arrow-circle-up::before { + content: "\f0aa"; } + +.fa-toggle-on::before { + content: "\f205"; } + +.fa-person-walking::before { + content: "\f554"; } + +.fa-walking::before { + content: "\f554"; } + +.fa-l::before { + content: "\4c"; } + +.fa-fire::before { + content: "\f06d"; } + +.fa-bed-pulse::before { + content: "\f487"; } + +.fa-procedures::before { + content: "\f487"; } + +.fa-shuttle-space::before { + content: "\f197"; } + +.fa-space-shuttle::before { + content: "\f197"; } + +.fa-face-laugh::before { + content: "\f599"; } + +.fa-laugh::before { + content: "\f599"; } + +.fa-folder-open::before { + content: "\f07c"; } + +.fa-heart-circle-plus::before { + content: "\e500"; } + +.fa-code-fork::before { + content: "\e13b"; } + +.fa-city::before { + content: "\f64f"; } + +.fa-microphone-lines::before { + content: "\f3c9"; } + +.fa-microphone-alt::before { + content: "\f3c9"; } + +.fa-pepper-hot::before { + content: "\f816"; } + +.fa-unlock::before { + content: "\f09c"; } + +.fa-colon-sign::before { + content: "\e140"; } + +.fa-headset::before { + content: "\f590"; } + +.fa-store-slash::before { + content: "\e071"; } + +.fa-road-circle-xmark::before { + content: "\e566"; } + +.fa-user-minus::before { + content: "\f503"; } + +.fa-mars-stroke-up::before { + content: "\f22a"; } + +.fa-mars-stroke-v::before { + content: "\f22a"; } + +.fa-champagne-glasses::before { + content: "\f79f"; } + +.fa-glass-cheers::before { + content: "\f79f"; } + +.fa-clipboard::before { + content: "\f328"; } + +.fa-house-circle-exclamation::before { + content: "\e50a"; } + +.fa-file-arrow-up::before { + content: "\f574"; } + +.fa-file-upload::before { + content: "\f574"; } + +.fa-wifi::before { + content: "\f1eb"; } + +.fa-wifi-3::before { + content: "\f1eb"; } + +.fa-wifi-strong::before { + content: "\f1eb"; } + +.fa-bath::before { + content: "\f2cd"; } + +.fa-bathtub::before { + content: "\f2cd"; } + +.fa-underline::before { + content: "\f0cd"; } + +.fa-user-pen::before { + content: "\f4ff"; } + +.fa-user-edit::before { + content: "\f4ff"; } + +.fa-signature::before { + content: "\f5b7"; } + +.fa-stroopwafel::before { + content: "\f551"; } + +.fa-bold::before { + content: "\f032"; } + +.fa-anchor-lock::before { + content: "\e4ad"; } + +.fa-building-ngo::before { + content: "\e4d7"; } + +.fa-manat-sign::before { + content: "\e1d5"; } + +.fa-not-equal::before { + content: "\f53e"; } + +.fa-border-top-left::before { + content: "\f853"; } + +.fa-border-style::before { + content: "\f853"; } + +.fa-map-location-dot::before { + content: "\f5a0"; } + +.fa-map-marked-alt::before { + content: "\f5a0"; } + +.fa-jedi::before { + content: "\f669"; } + +.fa-square-poll-vertical::before { + content: "\f681"; } + +.fa-poll::before { + content: "\f681"; } + +.fa-mug-hot::before { + content: "\f7b6"; } + +.fa-car-battery::before { + content: "\f5df"; } + +.fa-battery-car::before { + content: "\f5df"; } + +.fa-gift::before { + content: "\f06b"; } + +.fa-dice-two::before { + content: "\f528"; } + +.fa-chess-queen::before { + content: "\f445"; } + +.fa-glasses::before { + content: "\f530"; } + +.fa-chess-board::before { + content: "\f43c"; } + +.fa-building-circle-check::before { + content: "\e4d2"; } + +.fa-person-chalkboard::before { + content: "\e53d"; } + +.fa-mars-stroke-right::before { + content: "\f22b"; } + +.fa-mars-stroke-h::before { + content: "\f22b"; } + +.fa-hand-back-fist::before { + content: "\f255"; } + +.fa-hand-rock::before { + content: "\f255"; } + +.fa-square-caret-up::before { + content: "\f151"; } + +.fa-caret-square-up::before { + content: "\f151"; } + +.fa-cloud-showers-water::before { + content: "\e4e4"; } + +.fa-chart-bar::before { + content: "\f080"; } + +.fa-bar-chart::before { + content: "\f080"; } + +.fa-hands-bubbles::before { + content: "\e05e"; } + +.fa-hands-wash::before { + content: "\e05e"; } + +.fa-less-than-equal::before { + content: "\f537"; } + +.fa-train::before { + content: "\f238"; } + +.fa-eye-low-vision::before { + content: "\f2a8"; } + +.fa-low-vision::before { + content: "\f2a8"; } + +.fa-crow::before { + content: "\f520"; } + +.fa-sailboat::before { + content: "\e445"; } + +.fa-window-restore::before { + content: "\f2d2"; } + +.fa-square-plus::before { + content: "\f0fe"; } + +.fa-plus-square::before { + content: "\f0fe"; } + +.fa-torii-gate::before { + content: "\f6a1"; } + +.fa-frog::before { + content: "\f52e"; } + +.fa-bucket::before { + content: "\e4cf"; } + +.fa-image::before { + content: "\f03e"; } + +.fa-microphone::before { + content: "\f130"; } + +.fa-cow::before { + content: "\f6c8"; } + +.fa-caret-up::before { + content: "\f0d8"; } + +.fa-screwdriver::before { + content: "\f54a"; } + +.fa-folder-closed::before { + content: "\e185"; } + +.fa-house-tsunami::before { + content: "\e515"; } + +.fa-square-nfi::before { + content: "\e576"; } + +.fa-arrow-up-from-ground-water::before { + content: "\e4b5"; } + +.fa-martini-glass::before { + content: "\f57b"; } + +.fa-glass-martini-alt::before { + content: "\f57b"; } + +.fa-rotate-left::before { + content: "\f2ea"; } + +.fa-rotate-back::before { + content: "\f2ea"; } + +.fa-rotate-backward::before { + content: "\f2ea"; } + +.fa-undo-alt::before { + content: "\f2ea"; } + +.fa-table-columns::before { + content: "\f0db"; } + +.fa-columns::before { + content: "\f0db"; } + +.fa-lemon::before { + content: "\f094"; } + +.fa-head-side-mask::before { + content: "\e063"; } + +.fa-handshake::before { + content: "\f2b5"; } + +.fa-gem::before { + content: "\f3a5"; } + +.fa-dolly::before { + content: "\f472"; } + +.fa-dolly-box::before { + content: "\f472"; } + +.fa-smoking::before { + content: "\f48d"; } + +.fa-minimize::before { + content: "\f78c"; } + +.fa-compress-arrows-alt::before { + content: "\f78c"; } + +.fa-monument::before { + content: "\f5a6"; } + +.fa-snowplow::before { + content: "\f7d2"; } + +.fa-angles-right::before { + content: "\f101"; } + +.fa-angle-double-right::before { + content: "\f101"; } + +.fa-cannabis::before { + content: "\f55f"; } + +.fa-circle-play::before { + content: "\f144"; } + +.fa-play-circle::before { + content: "\f144"; } + +.fa-tablets::before { + content: "\f490"; } + +.fa-ethernet::before { + content: "\f796"; } + +.fa-euro-sign::before { + content: "\f153"; } + +.fa-eur::before { + content: "\f153"; } + +.fa-euro::before { + content: "\f153"; } + +.fa-chair::before { + content: "\f6c0"; } + +.fa-circle-check::before { + content: "\f058"; } + +.fa-check-circle::before { + content: "\f058"; } + +.fa-circle-stop::before { + content: "\f28d"; } + +.fa-stop-circle::before { + content: "\f28d"; } + +.fa-compass-drafting::before { + content: "\f568"; } + +.fa-drafting-compass::before { + content: "\f568"; } + +.fa-plate-wheat::before { + content: "\e55a"; } + +.fa-icicles::before { + content: "\f7ad"; } + +.fa-person-shelter::before { + content: "\e54f"; } + +.fa-neuter::before { + content: "\f22c"; } + +.fa-id-badge::before { + content: "\f2c1"; } + +.fa-marker::before { + content: "\f5a1"; } + +.fa-face-laugh-beam::before { + content: "\f59a"; } + +.fa-laugh-beam::before { + content: "\f59a"; } + +.fa-helicopter-symbol::before { + content: "\e502"; } + +.fa-universal-access::before { + content: "\f29a"; } + +.fa-circle-chevron-up::before { + content: "\f139"; } + +.fa-chevron-circle-up::before { + content: "\f139"; } + +.fa-lari-sign::before { + content: "\e1c8"; } + +.fa-volcano::before { + content: "\f770"; } + +.fa-person-walking-dashed-line-arrow-right::before { + content: "\e553"; } + +.fa-sterling-sign::before { + content: "\f154"; } + +.fa-gbp::before { + content: "\f154"; } + +.fa-pound-sign::before { + content: "\f154"; } + +.fa-viruses::before { + content: "\e076"; } + +.fa-square-person-confined::before { + content: "\e577"; } + +.fa-user-tie::before { + content: "\f508"; } + +.fa-arrow-down-long::before { + content: "\f175"; } + +.fa-long-arrow-down::before { + content: "\f175"; } + +.fa-tent-arrow-down-to-line::before { + content: "\e57e"; } + +.fa-certificate::before { + content: "\f0a3"; } + +.fa-reply-all::before { + content: "\f122"; } + +.fa-mail-reply-all::before { + content: "\f122"; } + +.fa-suitcase::before { + content: "\f0f2"; } + +.fa-person-skating::before { + content: "\f7c5"; } + +.fa-skating::before { + content: "\f7c5"; } + +.fa-filter-circle-dollar::before { + content: "\f662"; } + +.fa-funnel-dollar::before { + content: "\f662"; } + +.fa-camera-retro::before { + content: "\f083"; } + +.fa-circle-arrow-down::before { + content: "\f0ab"; } + +.fa-arrow-circle-down::before { + content: "\f0ab"; } + +.fa-file-import::before { + content: "\f56f"; } + +.fa-arrow-right-to-file::before { + content: "\f56f"; } + +.fa-square-arrow-up-right::before { + content: "\f14c"; } + +.fa-external-link-square::before { + content: "\f14c"; } + +.fa-box-open::before { + content: "\f49e"; } + +.fa-scroll::before { + content: "\f70e"; } + +.fa-spa::before { + content: "\f5bb"; } + +.fa-location-pin-lock::before { + content: "\e51f"; } + +.fa-pause::before { + content: "\f04c"; } + +.fa-hill-avalanche::before { + content: "\e507"; } + +.fa-temperature-empty::before { + content: "\f2cb"; } + +.fa-temperature-0::before { + content: "\f2cb"; } + +.fa-thermometer-0::before { + content: "\f2cb"; } + +.fa-thermometer-empty::before { + content: "\f2cb"; } + +.fa-bomb::before { + content: "\f1e2"; } + +.fa-registered::before { + content: "\f25d"; } + +.fa-address-card::before { + content: "\f2bb"; } + +.fa-contact-card::before { + content: "\f2bb"; } + +.fa-vcard::before { + content: "\f2bb"; } + +.fa-scale-unbalanced-flip::before { + content: "\f516"; } + +.fa-balance-scale-right::before { + content: "\f516"; } + +.fa-subscript::before { + content: "\f12c"; } + +.fa-diamond-turn-right::before { + content: "\f5eb"; } + +.fa-directions::before { + content: "\f5eb"; } + +.fa-burst::before { + content: "\e4dc"; } + +.fa-house-laptop::before { + content: "\e066"; } + +.fa-laptop-house::before { + content: "\e066"; } + +.fa-face-tired::before { + content: "\f5c8"; } + +.fa-tired::before { + content: "\f5c8"; } + +.fa-money-bills::before { + content: "\e1f3"; } + +.fa-smog::before { + content: "\f75f"; } + +.fa-crutch::before { + content: "\f7f7"; } + +.fa-cloud-arrow-up::before { + content: "\f0ee"; } + +.fa-cloud-upload::before { + content: "\f0ee"; } + +.fa-cloud-upload-alt::before { + content: "\f0ee"; } + +.fa-palette::before { + content: "\f53f"; } + +.fa-arrows-turn-right::before { + content: "\e4c0"; } + +.fa-vest::before { + content: "\e085"; } + +.fa-ferry::before { + content: "\e4ea"; } + +.fa-arrows-down-to-people::before { + content: "\e4b9"; } + +.fa-seedling::before { + content: "\f4d8"; } + +.fa-sprout::before { + content: "\f4d8"; } + +.fa-left-right::before { + content: "\f337"; } + +.fa-arrows-alt-h::before { + content: "\f337"; } + +.fa-boxes-packing::before { + content: "\e4c7"; } + +.fa-circle-arrow-left::before { + content: "\f0a8"; } + +.fa-arrow-circle-left::before { + content: "\f0a8"; } + +.fa-group-arrows-rotate::before { + content: "\e4f6"; } + +.fa-bowl-food::before { + content: "\e4c6"; } + +.fa-candy-cane::before { + content: "\f786"; } + +.fa-arrow-down-wide-short::before { + content: "\f160"; } + +.fa-sort-amount-asc::before { + content: "\f160"; } + +.fa-sort-amount-down::before { + content: "\f160"; } + +.fa-cloud-bolt::before { + content: "\f76c"; } + +.fa-thunderstorm::before { + content: "\f76c"; } + +.fa-text-slash::before { + content: "\f87d"; } + +.fa-remove-format::before { + content: "\f87d"; } + +.fa-face-smile-wink::before { + content: "\f4da"; } + +.fa-smile-wink::before { + content: "\f4da"; } + +.fa-file-word::before { + content: "\f1c2"; } + +.fa-file-powerpoint::before { + content: "\f1c4"; } + +.fa-arrows-left-right::before { + content: "\f07e"; } + +.fa-arrows-h::before { + content: "\f07e"; } + +.fa-house-lock::before { + content: "\e510"; } + +.fa-cloud-arrow-down::before { + content: "\f0ed"; } + +.fa-cloud-download::before { + content: "\f0ed"; } + +.fa-cloud-download-alt::before { + content: "\f0ed"; } + +.fa-children::before { + content: "\e4e1"; } + +.fa-chalkboard::before { + content: "\f51b"; } + +.fa-blackboard::before { + content: "\f51b"; } + +.fa-user-large-slash::before { + content: "\f4fa"; } + +.fa-user-alt-slash::before { + content: "\f4fa"; } + +.fa-envelope-open::before { + content: "\f2b6"; } + +.fa-handshake-simple-slash::before { + content: "\e05f"; } + +.fa-handshake-alt-slash::before { + content: "\e05f"; } + +.fa-mattress-pillow::before { + content: "\e525"; } + +.fa-guarani-sign::before { + content: "\e19a"; } + +.fa-arrows-rotate::before { + content: "\f021"; } + +.fa-refresh::before { + content: "\f021"; } + +.fa-sync::before { + content: "\f021"; } + +.fa-fire-extinguisher::before { + content: "\f134"; } + +.fa-cruzeiro-sign::before { + content: "\e152"; } + +.fa-greater-than-equal::before { + content: "\f532"; } + +.fa-shield-halved::before { + content: "\f3ed"; } + +.fa-shield-alt::before { + content: "\f3ed"; } + +.fa-book-atlas::before { + content: "\f558"; } + +.fa-atlas::before { + content: "\f558"; } + +.fa-virus::before { + content: "\e074"; } + +.fa-envelope-circle-check::before { + content: "\e4e8"; } + +.fa-layer-group::before { + content: "\f5fd"; } + +.fa-arrows-to-dot::before { + content: "\e4be"; } + +.fa-archway::before { + content: "\f557"; } + +.fa-heart-circle-check::before { + content: "\e4fd"; } + +.fa-house-chimney-crack::before { + content: "\f6f1"; } + +.fa-house-damage::before { + content: "\f6f1"; } + +.fa-file-zipper::before { + content: "\f1c6"; } + +.fa-file-archive::before { + content: "\f1c6"; } + +.fa-square::before { + content: "\f0c8"; } + +.fa-martini-glass-empty::before { + content: "\f000"; } + +.fa-glass-martini::before { + content: "\f000"; } + +.fa-couch::before { + content: "\f4b8"; } + +.fa-cedi-sign::before { + content: "\e0df"; } + +.fa-italic::before { + content: "\f033"; } + +.fa-table-cells-column-lock::before { + content: "\e678"; } + +.fa-church::before { + content: "\f51d"; } + +.fa-comments-dollar::before { + content: "\f653"; } + +.fa-democrat::before { + content: "\f747"; } + +.fa-z::before { + content: "\5a"; } + +.fa-person-skiing::before { + content: "\f7c9"; } + +.fa-skiing::before { + content: "\f7c9"; } + +.fa-road-lock::before { + content: "\e567"; } + +.fa-a::before { + content: "\41"; } + +.fa-temperature-arrow-down::before { + content: "\e03f"; } + +.fa-temperature-down::before { + content: "\e03f"; } + +.fa-feather-pointed::before { + content: "\f56b"; } + +.fa-feather-alt::before { + content: "\f56b"; } + +.fa-p::before { + content: "\50"; } + +.fa-snowflake::before { + content: "\f2dc"; } + +.fa-newspaper::before { + content: "\f1ea"; } + +.fa-rectangle-ad::before { + content: "\f641"; } + +.fa-ad::before { + content: "\f641"; } + +.fa-circle-arrow-right::before { + content: "\f0a9"; } + +.fa-arrow-circle-right::before { + content: "\f0a9"; } + +.fa-filter-circle-xmark::before { + content: "\e17b"; } + +.fa-locust::before { + content: "\e520"; } + +.fa-sort::before { + content: "\f0dc"; } + +.fa-unsorted::before { + content: "\f0dc"; } + +.fa-list-ol::before { + content: "\f0cb"; } + +.fa-list-1-2::before { + content: "\f0cb"; } + +.fa-list-numeric::before { + content: "\f0cb"; } + +.fa-person-dress-burst::before { + content: "\e544"; } + +.fa-money-check-dollar::before { + content: "\f53d"; } + +.fa-money-check-alt::before { + content: "\f53d"; } + +.fa-vector-square::before { + content: "\f5cb"; } + +.fa-bread-slice::before { + content: "\f7ec"; } + +.fa-language::before { + content: "\f1ab"; } + +.fa-face-kiss-wink-heart::before { + content: "\f598"; } + +.fa-kiss-wink-heart::before { + content: "\f598"; } + +.fa-filter::before { + content: "\f0b0"; } + +.fa-question::before { + content: "\3f"; } + +.fa-file-signature::before { + content: "\f573"; } + +.fa-up-down-left-right::before { + content: "\f0b2"; } + +.fa-arrows-alt::before { + content: "\f0b2"; } + +.fa-house-chimney-user::before { + content: "\e065"; } + +.fa-hand-holding-heart::before { + content: "\f4be"; } + +.fa-puzzle-piece::before { + content: "\f12e"; } + +.fa-money-check::before { + content: "\f53c"; } + +.fa-star-half-stroke::before { + content: "\f5c0"; } + +.fa-star-half-alt::before { + content: "\f5c0"; } + +.fa-code::before { + content: "\f121"; } + +.fa-whiskey-glass::before { + content: "\f7a0"; } + +.fa-glass-whiskey::before { + content: "\f7a0"; } + +.fa-building-circle-exclamation::before { + content: "\e4d3"; } + +.fa-magnifying-glass-chart::before { + content: "\e522"; } + +.fa-arrow-up-right-from-square::before { + content: "\f08e"; } + +.fa-external-link::before { + content: "\f08e"; } + +.fa-cubes-stacked::before { + content: "\e4e6"; } + +.fa-won-sign::before { + content: "\f159"; } + +.fa-krw::before { + content: "\f159"; } + +.fa-won::before { + content: "\f159"; } + +.fa-virus-covid::before { + content: "\e4a8"; } + +.fa-austral-sign::before { + content: "\e0a9"; } + +.fa-f::before { + content: "\46"; } + +.fa-leaf::before { + content: "\f06c"; } + +.fa-road::before { + content: "\f018"; } + +.fa-taxi::before { + content: "\f1ba"; } + +.fa-cab::before { + content: "\f1ba"; } + +.fa-person-circle-plus::before { + content: "\e541"; } + +.fa-chart-pie::before { + content: "\f200"; } + +.fa-pie-chart::before { + content: "\f200"; } + +.fa-bolt-lightning::before { + content: "\e0b7"; } + +.fa-sack-xmark::before { + content: "\e56a"; } + +.fa-file-excel::before { + content: "\f1c3"; } + +.fa-file-contract::before { + content: "\f56c"; } + +.fa-fish-fins::before { + content: "\e4f2"; } + +.fa-building-flag::before { + content: "\e4d5"; } + +.fa-face-grin-beam::before { + content: "\f582"; } + +.fa-grin-beam::before { + content: "\f582"; } + +.fa-object-ungroup::before { + content: "\f248"; } + +.fa-poop::before { + content: "\f619"; } + +.fa-location-pin::before { + content: "\f041"; } + +.fa-map-marker::before { + content: "\f041"; } + +.fa-kaaba::before { + content: "\f66b"; } + +.fa-toilet-paper::before { + content: "\f71e"; } + +.fa-helmet-safety::before { + content: "\f807"; } + +.fa-hard-hat::before { + content: "\f807"; } + +.fa-hat-hard::before { + content: "\f807"; } + +.fa-eject::before { + content: "\f052"; } + +.fa-circle-right::before { + content: "\f35a"; } + +.fa-arrow-alt-circle-right::before { + content: "\f35a"; } + +.fa-plane-circle-check::before { + content: "\e555"; } + +.fa-face-rolling-eyes::before { + content: "\f5a5"; } + +.fa-meh-rolling-eyes::before { + content: "\f5a5"; } + +.fa-object-group::before { + content: "\f247"; } + +.fa-chart-line::before { + content: "\f201"; } + +.fa-line-chart::before { + content: "\f201"; } + +.fa-mask-ventilator::before { + content: "\e524"; } + +.fa-arrow-right::before { + content: "\f061"; } + +.fa-signs-post::before { + content: "\f277"; } + +.fa-map-signs::before { + content: "\f277"; } + +.fa-cash-register::before { + content: "\f788"; } + +.fa-person-circle-question::before { + content: "\e542"; } + +.fa-h::before { + content: "\48"; } + +.fa-tarp::before { + content: "\e57b"; } + +.fa-screwdriver-wrench::before { + content: "\f7d9"; } + +.fa-tools::before { + content: "\f7d9"; } + +.fa-arrows-to-eye::before { + content: "\e4bf"; } + +.fa-plug-circle-bolt::before { + content: "\e55b"; } + +.fa-heart::before { + content: "\f004"; } + +.fa-mars-and-venus::before { + content: "\f224"; } + +.fa-house-user::before { + content: "\e1b0"; } + +.fa-home-user::before { + content: "\e1b0"; } + +.fa-dumpster-fire::before { + content: "\f794"; } + +.fa-house-crack::before { + content: "\e3b1"; } + +.fa-martini-glass-citrus::before { + content: "\f561"; } + +.fa-cocktail::before { + content: "\f561"; } + +.fa-face-surprise::before { + content: "\f5c2"; } + +.fa-surprise::before { + content: "\f5c2"; } + +.fa-bottle-water::before { + content: "\e4c5"; } + +.fa-circle-pause::before { + content: "\f28b"; } + +.fa-pause-circle::before { + content: "\f28b"; } + +.fa-toilet-paper-slash::before { + content: "\e072"; } + +.fa-apple-whole::before { + content: "\f5d1"; } + +.fa-apple-alt::before { + content: "\f5d1"; } + +.fa-kitchen-set::before { + content: "\e51a"; } + +.fa-r::before { + content: "\52"; } + +.fa-temperature-quarter::before { + content: "\f2ca"; } + +.fa-temperature-1::before { + content: "\f2ca"; } + +.fa-thermometer-1::before { + content: "\f2ca"; } + +.fa-thermometer-quarter::before { + content: "\f2ca"; } + +.fa-cube::before { + content: "\f1b2"; } + +.fa-bitcoin-sign::before { + content: "\e0b4"; } + +.fa-shield-dog::before { + content: "\e573"; } + +.fa-solar-panel::before { + content: "\f5ba"; } + +.fa-lock-open::before { + content: "\f3c1"; } + +.fa-elevator::before { + content: "\e16d"; } + +.fa-money-bill-transfer::before { + content: "\e528"; } + +.fa-money-bill-trend-up::before { + content: "\e529"; } + +.fa-house-flood-water-circle-arrow-right::before { + content: "\e50f"; } + +.fa-square-poll-horizontal::before { + content: "\f682"; } + +.fa-poll-h::before { + content: "\f682"; } + +.fa-circle::before { + content: "\f111"; } + +.fa-backward-fast::before { + content: "\f049"; } + +.fa-fast-backward::before { + content: "\f049"; } + +.fa-recycle::before { + content: "\f1b8"; } + +.fa-user-astronaut::before { + content: "\f4fb"; } + +.fa-plane-slash::before { + content: "\e069"; } + +.fa-trademark::before { + content: "\f25c"; } + +.fa-basketball::before { + content: "\f434"; } + +.fa-basketball-ball::before { + content: "\f434"; } + +.fa-satellite-dish::before { + content: "\f7c0"; } + +.fa-circle-up::before { + content: "\f35b"; } + +.fa-arrow-alt-circle-up::before { + content: "\f35b"; } + +.fa-mobile-screen-button::before { + content: "\f3cd"; } + +.fa-mobile-alt::before { + content: "\f3cd"; } + +.fa-volume-high::before { + content: "\f028"; } + +.fa-volume-up::before { + content: "\f028"; } + +.fa-users-rays::before { + content: "\e593"; } + +.fa-wallet::before { + content: "\f555"; } + +.fa-clipboard-check::before { + content: "\f46c"; } + +.fa-file-audio::before { + content: "\f1c7"; } + +.fa-burger::before { + content: "\f805"; } + +.fa-hamburger::before { + content: "\f805"; } + +.fa-wrench::before { + content: "\f0ad"; } + +.fa-bugs::before { + content: "\e4d0"; } + +.fa-rupee-sign::before { + content: "\f156"; } + +.fa-rupee::before { + content: "\f156"; } + +.fa-file-image::before { + content: "\f1c5"; } + +.fa-circle-question::before { + content: "\f059"; } + +.fa-question-circle::before { + content: "\f059"; } + +.fa-plane-departure::before { + content: "\f5b0"; } + +.fa-handshake-slash::before { + content: "\e060"; } + +.fa-book-bookmark::before { + content: "\e0bb"; } + +.fa-code-branch::before { + content: "\f126"; } + +.fa-hat-cowboy::before { + content: "\f8c0"; } + +.fa-bridge::before { + content: "\e4c8"; } + +.fa-phone-flip::before { + content: "\f879"; } + +.fa-phone-alt::before { + content: "\f879"; } + +.fa-truck-front::before { + content: "\e2b7"; } + +.fa-cat::before { + content: "\f6be"; } + +.fa-anchor-circle-exclamation::before { + content: "\e4ab"; } + +.fa-truck-field::before { + content: "\e58d"; } + +.fa-route::before { + content: "\f4d7"; } + +.fa-clipboard-question::before { + content: "\e4e3"; } + +.fa-panorama::before { + content: "\e209"; } + +.fa-comment-medical::before { + content: "\f7f5"; } + +.fa-teeth-open::before { + content: "\f62f"; } + +.fa-file-circle-minus::before { + content: "\e4ed"; } + +.fa-tags::before { + content: "\f02c"; } + +.fa-wine-glass::before { + content: "\f4e3"; } + +.fa-forward-fast::before { + content: "\f050"; } + +.fa-fast-forward::before { + content: "\f050"; } + +.fa-face-meh-blank::before { + content: "\f5a4"; } + +.fa-meh-blank::before { + content: "\f5a4"; } + +.fa-square-parking::before { + content: "\f540"; } + +.fa-parking::before { + content: "\f540"; } + +.fa-house-signal::before { + content: "\e012"; } + +.fa-bars-progress::before { + content: "\f828"; } + +.fa-tasks-alt::before { + content: "\f828"; } + +.fa-faucet-drip::before { + content: "\e006"; } + +.fa-cart-flatbed::before { + content: "\f474"; } + +.fa-dolly-flatbed::before { + content: "\f474"; } + +.fa-ban-smoking::before { + content: "\f54d"; } + +.fa-smoking-ban::before { + content: "\f54d"; } + +.fa-terminal::before { + content: "\f120"; } + +.fa-mobile-button::before { + content: "\f10b"; } + +.fa-house-medical-flag::before { + content: "\e514"; } + +.fa-basket-shopping::before { + content: "\f291"; } + +.fa-shopping-basket::before { + content: "\f291"; } + +.fa-tape::before { + content: "\f4db"; } + +.fa-bus-simple::before { + content: "\f55e"; } + +.fa-bus-alt::before { + content: "\f55e"; } + +.fa-eye::before { + content: "\f06e"; } + +.fa-face-sad-cry::before { + content: "\f5b3"; } + +.fa-sad-cry::before { + content: "\f5b3"; } + +.fa-audio-description::before { + content: "\f29e"; } + +.fa-person-military-to-person::before { + content: "\e54c"; } + +.fa-file-shield::before { + content: "\e4f0"; } + +.fa-user-slash::before { + content: "\f506"; } + +.fa-pen::before { + content: "\f304"; } + +.fa-tower-observation::before { + content: "\e586"; } + +.fa-file-code::before { + content: "\f1c9"; } + +.fa-signal::before { + content: "\f012"; } + +.fa-signal-5::before { + content: "\f012"; } + +.fa-signal-perfect::before { + content: "\f012"; } + +.fa-bus::before { + content: "\f207"; } + +.fa-heart-circle-xmark::before { + content: "\e501"; } + +.fa-house-chimney::before { + content: "\e3af"; } + +.fa-home-lg::before { + content: "\e3af"; } + +.fa-window-maximize::before { + content: "\f2d0"; } + +.fa-face-frown::before { + content: "\f119"; } + +.fa-frown::before { + content: "\f119"; } + +.fa-prescription::before { + content: "\f5b1"; } + +.fa-shop::before { + content: "\f54f"; } + +.fa-store-alt::before { + content: "\f54f"; } + +.fa-floppy-disk::before { + content: "\f0c7"; } + +.fa-save::before { + content: "\f0c7"; } + +.fa-vihara::before { + content: "\f6a7"; } + +.fa-scale-unbalanced::before { + content: "\f515"; } + +.fa-balance-scale-left::before { + content: "\f515"; } + +.fa-sort-up::before { + content: "\f0de"; } + +.fa-sort-asc::before { + content: "\f0de"; } + +.fa-comment-dots::before { + content: "\f4ad"; } + +.fa-commenting::before { + content: "\f4ad"; } + +.fa-plant-wilt::before { + content: "\e5aa"; } + +.fa-diamond::before { + content: "\f219"; } + +.fa-face-grin-squint::before { + content: "\f585"; } + +.fa-grin-squint::before { + content: "\f585"; } + +.fa-hand-holding-dollar::before { + content: "\f4c0"; } + +.fa-hand-holding-usd::before { + content: "\f4c0"; } + +.fa-bacterium::before { + content: "\e05a"; } + +.fa-hand-pointer::before { + content: "\f25a"; } + +.fa-drum-steelpan::before { + content: "\f56a"; } + +.fa-hand-scissors::before { + content: "\f257"; } + +.fa-hands-praying::before { + content: "\f684"; } + +.fa-praying-hands::before { + content: "\f684"; } + +.fa-arrow-rotate-right::before { + content: "\f01e"; } + +.fa-arrow-right-rotate::before { + content: "\f01e"; } + +.fa-arrow-rotate-forward::before { + content: "\f01e"; } + +.fa-redo::before { + content: "\f01e"; } + +.fa-biohazard::before { + content: "\f780"; } + +.fa-location-crosshairs::before { + content: "\f601"; } + +.fa-location::before { + content: "\f601"; } + +.fa-mars-double::before { + content: "\f227"; } + +.fa-child-dress::before { + content: "\e59c"; } + +.fa-users-between-lines::before { + content: "\e591"; } + +.fa-lungs-virus::before { + content: "\e067"; } + +.fa-face-grin-tears::before { + content: "\f588"; } + +.fa-grin-tears::before { + content: "\f588"; } + +.fa-phone::before { + content: "\f095"; } + +.fa-calendar-xmark::before { + content: "\f273"; } + +.fa-calendar-times::before { + content: "\f273"; } + +.fa-child-reaching::before { + content: "\e59d"; } + +.fa-head-side-virus::before { + content: "\e064"; } + +.fa-user-gear::before { + content: "\f4fe"; } + +.fa-user-cog::before { + content: "\f4fe"; } + +.fa-arrow-up-1-9::before { + content: "\f163"; } + +.fa-sort-numeric-up::before { + content: "\f163"; } + +.fa-door-closed::before { + content: "\f52a"; } + +.fa-shield-virus::before { + content: "\e06c"; } + +.fa-dice-six::before { + content: "\f526"; } + +.fa-mosquito-net::before { + content: "\e52c"; } + +.fa-bridge-water::before { + content: "\e4ce"; } + +.fa-person-booth::before { + content: "\f756"; } + +.fa-text-width::before { + content: "\f035"; } + +.fa-hat-wizard::before { + content: "\f6e8"; } + +.fa-pen-fancy::before { + content: "\f5ac"; } + +.fa-person-digging::before { + content: "\f85e"; } + +.fa-digging::before { + content: "\f85e"; } + +.fa-trash::before { + content: "\f1f8"; } + +.fa-gauge-simple::before { + content: "\f629"; } + +.fa-gauge-simple-med::before { + content: "\f629"; } + +.fa-tachometer-average::before { + content: "\f629"; } + +.fa-book-medical::before { + content: "\f7e6"; } + +.fa-poo::before { + content: "\f2fe"; } + +.fa-quote-right::before { + content: "\f10e"; } + +.fa-quote-right-alt::before { + content: "\f10e"; } + +.fa-shirt::before { + content: "\f553"; } + +.fa-t-shirt::before { + content: "\f553"; } + +.fa-tshirt::before { + content: "\f553"; } + +.fa-cubes::before { + content: "\f1b3"; } + +.fa-divide::before { + content: "\f529"; } + +.fa-tenge-sign::before { + content: "\f7d7"; } + +.fa-tenge::before { + content: "\f7d7"; } + +.fa-headphones::before { + content: "\f025"; } + +.fa-hands-holding::before { + content: "\f4c2"; } + +.fa-hands-clapping::before { + content: "\e1a8"; } + +.fa-republican::before { + content: "\f75e"; } + +.fa-arrow-left::before { + content: "\f060"; } + +.fa-person-circle-xmark::before { + content: "\e543"; } + +.fa-ruler::before { + content: "\f545"; } + +.fa-align-left::before { + content: "\f036"; } + +.fa-dice-d6::before { + content: "\f6d1"; } + +.fa-restroom::before { + content: "\f7bd"; } + +.fa-j::before { + content: "\4a"; } + +.fa-users-viewfinder::before { + content: "\e595"; } + +.fa-file-video::before { + content: "\f1c8"; } + +.fa-up-right-from-square::before { + content: "\f35d"; } + +.fa-external-link-alt::before { + content: "\f35d"; } + +.fa-table-cells::before { + content: "\f00a"; } + +.fa-th::before { + content: "\f00a"; } + +.fa-file-pdf::before { + content: "\f1c1"; } + +.fa-book-bible::before { + content: "\f647"; } + +.fa-bible::before { + content: "\f647"; } + +.fa-o::before { + content: "\4f"; } + +.fa-suitcase-medical::before { + content: "\f0fa"; } + +.fa-medkit::before { + content: "\f0fa"; } + +.fa-user-secret::before { + content: "\f21b"; } + +.fa-otter::before { + content: "\f700"; } + +.fa-person-dress::before { + content: "\f182"; } + +.fa-female::before { + content: "\f182"; } + +.fa-comment-dollar::before { + content: "\f651"; } + +.fa-business-time::before { + content: "\f64a"; } + +.fa-briefcase-clock::before { + content: "\f64a"; } + +.fa-table-cells-large::before { + content: "\f009"; } + +.fa-th-large::before { + content: "\f009"; } + +.fa-book-tanakh::before { + content: "\f827"; } + +.fa-tanakh::before { + content: "\f827"; } + +.fa-phone-volume::before { + content: "\f2a0"; } + +.fa-volume-control-phone::before { + content: "\f2a0"; } + +.fa-hat-cowboy-side::before { + content: "\f8c1"; } + +.fa-clipboard-user::before { + content: "\f7f3"; } + +.fa-child::before { + content: "\f1ae"; } + +.fa-lira-sign::before { + content: "\f195"; } + +.fa-satellite::before { + content: "\f7bf"; } + +.fa-plane-lock::before { + content: "\e558"; } + +.fa-tag::before { + content: "\f02b"; } + +.fa-comment::before { + content: "\f075"; } + +.fa-cake-candles::before { + content: "\f1fd"; } + +.fa-birthday-cake::before { + content: "\f1fd"; } + +.fa-cake::before { + content: "\f1fd"; } + +.fa-envelope::before { + content: "\f0e0"; } + +.fa-angles-up::before { + content: "\f102"; } + +.fa-angle-double-up::before { + content: "\f102"; } + +.fa-paperclip::before { + content: "\f0c6"; } + +.fa-arrow-right-to-city::before { + content: "\e4b3"; } + +.fa-ribbon::before { + content: "\f4d6"; } + +.fa-lungs::before { + content: "\f604"; } + +.fa-arrow-up-9-1::before { + content: "\f887"; } + +.fa-sort-numeric-up-alt::before { + content: "\f887"; } + +.fa-litecoin-sign::before { + content: "\e1d3"; } + +.fa-border-none::before { + content: "\f850"; } + +.fa-circle-nodes::before { + content: "\e4e2"; } + +.fa-parachute-box::before { + content: "\f4cd"; } + +.fa-indent::before { + content: "\f03c"; } + +.fa-truck-field-un::before { + content: "\e58e"; } + +.fa-hourglass::before { + content: "\f254"; } + +.fa-hourglass-empty::before { + content: "\f254"; } + +.fa-mountain::before { + content: "\f6fc"; } + +.fa-user-doctor::before { + content: "\f0f0"; } + +.fa-user-md::before { + content: "\f0f0"; } + +.fa-circle-info::before { + content: "\f05a"; } + +.fa-info-circle::before { + content: "\f05a"; } + +.fa-cloud-meatball::before { + content: "\f73b"; } + +.fa-camera::before { + content: "\f030"; } + +.fa-camera-alt::before { + content: "\f030"; } + +.fa-square-virus::before { + content: "\e578"; } + +.fa-meteor::before { + content: "\f753"; } + +.fa-car-on::before { + content: "\e4dd"; } + +.fa-sleigh::before { + content: "\f7cc"; } + +.fa-arrow-down-1-9::before { + content: "\f162"; } + +.fa-sort-numeric-asc::before { + content: "\f162"; } + +.fa-sort-numeric-down::before { + content: "\f162"; } + +.fa-hand-holding-droplet::before { + content: "\f4c1"; } + +.fa-hand-holding-water::before { + content: "\f4c1"; } + +.fa-water::before { + content: "\f773"; } + +.fa-calendar-check::before { + content: "\f274"; } + +.fa-braille::before { + content: "\f2a1"; } + +.fa-prescription-bottle-medical::before { + content: "\f486"; } + +.fa-prescription-bottle-alt::before { + content: "\f486"; } + +.fa-landmark::before { + content: "\f66f"; } + +.fa-truck::before { + content: "\f0d1"; } + +.fa-crosshairs::before { + content: "\f05b"; } + +.fa-person-cane::before { + content: "\e53c"; } + +.fa-tent::before { + content: "\e57d"; } + +.fa-vest-patches::before { + content: "\e086"; } + +.fa-check-double::before { + content: "\f560"; } + +.fa-arrow-down-a-z::before { + content: "\f15d"; } + +.fa-sort-alpha-asc::before { + content: "\f15d"; } + +.fa-sort-alpha-down::before { + content: "\f15d"; } + +.fa-money-bill-wheat::before { + content: "\e52a"; } + +.fa-cookie::before { + content: "\f563"; } + +.fa-arrow-rotate-left::before { + content: "\f0e2"; } + +.fa-arrow-left-rotate::before { + content: "\f0e2"; } + +.fa-arrow-rotate-back::before { + content: "\f0e2"; } + +.fa-arrow-rotate-backward::before { + content: "\f0e2"; } + +.fa-undo::before { + content: "\f0e2"; } + +.fa-hard-drive::before { + content: "\f0a0"; } + +.fa-hdd::before { + content: "\f0a0"; } + +.fa-face-grin-squint-tears::before { + content: "\f586"; } + +.fa-grin-squint-tears::before { + content: "\f586"; } + +.fa-dumbbell::before { + content: "\f44b"; } + +.fa-rectangle-list::before { + content: "\f022"; } + +.fa-list-alt::before { + content: "\f022"; } + +.fa-tarp-droplet::before { + content: "\e57c"; } + +.fa-house-medical-circle-check::before { + content: "\e511"; } + +.fa-person-skiing-nordic::before { + content: "\f7ca"; } + +.fa-skiing-nordic::before { + content: "\f7ca"; } + +.fa-calendar-plus::before { + content: "\f271"; } + +.fa-plane-arrival::before { + content: "\f5af"; } + +.fa-circle-left::before { + content: "\f359"; } + +.fa-arrow-alt-circle-left::before { + content: "\f359"; } + +.fa-train-subway::before { + content: "\f239"; } + +.fa-subway::before { + content: "\f239"; } + +.fa-chart-gantt::before { + content: "\e0e4"; } + +.fa-indian-rupee-sign::before { + content: "\e1bc"; } + +.fa-indian-rupee::before { + content: "\e1bc"; } + +.fa-inr::before { + content: "\e1bc"; } + +.fa-crop-simple::before { + content: "\f565"; } + +.fa-crop-alt::before { + content: "\f565"; } + +.fa-money-bill-1::before { + content: "\f3d1"; } + +.fa-money-bill-alt::before { + content: "\f3d1"; } + +.fa-left-long::before { + content: "\f30a"; } + +.fa-long-arrow-alt-left::before { + content: "\f30a"; } + +.fa-dna::before { + content: "\f471"; } + +.fa-virus-slash::before { + content: "\e075"; } + +.fa-minus::before { + content: "\f068"; } + +.fa-subtract::before { + content: "\f068"; } + +.fa-chess::before { + content: "\f439"; } + +.fa-arrow-left-long::before { + content: "\f177"; } + +.fa-long-arrow-left::before { + content: "\f177"; } + +.fa-plug-circle-check::before { + content: "\e55c"; } + +.fa-street-view::before { + content: "\f21d"; } + +.fa-franc-sign::before { + content: "\e18f"; } + +.fa-volume-off::before { + content: "\f026"; } + +.fa-hands-asl-interpreting::before { + content: "\f2a3"; } + +.fa-american-sign-language-interpreting::before { + content: "\f2a3"; } + +.fa-asl-interpreting::before { + content: "\f2a3"; } + +.fa-hands-american-sign-language-interpreting::before { + content: "\f2a3"; } + +.fa-gear::before { + content: "\f013"; } + +.fa-cog::before { + content: "\f013"; } + +.fa-droplet-slash::before { + content: "\f5c7"; } + +.fa-tint-slash::before { + content: "\f5c7"; } + +.fa-mosque::before { + content: "\f678"; } + +.fa-mosquito::before { + content: "\e52b"; } + +.fa-star-of-david::before { + content: "\f69a"; } + +.fa-person-military-rifle::before { + content: "\e54b"; } + +.fa-cart-shopping::before { + content: "\f07a"; } + +.fa-shopping-cart::before { + content: "\f07a"; } + +.fa-vials::before { + content: "\f493"; } + +.fa-plug-circle-plus::before { + content: "\e55f"; } + +.fa-place-of-worship::before { + content: "\f67f"; } + +.fa-grip-vertical::before { + content: "\f58e"; } + +.fa-arrow-turn-up::before { + content: "\f148"; } + +.fa-level-up::before { + content: "\f148"; } + +.fa-u::before { + content: "\55"; } + +.fa-square-root-variable::before { + content: "\f698"; } + +.fa-square-root-alt::before { + content: "\f698"; } + +.fa-clock::before { + content: "\f017"; } + +.fa-clock-four::before { + content: "\f017"; } + +.fa-backward-step::before { + content: "\f048"; } + +.fa-step-backward::before { + content: "\f048"; } + +.fa-pallet::before { + content: "\f482"; } + +.fa-faucet::before { + content: "\e005"; } + +.fa-baseball-bat-ball::before { + content: "\f432"; } + +.fa-s::before { + content: "\53"; } + +.fa-timeline::before { + content: "\e29c"; } + +.fa-keyboard::before { + content: "\f11c"; } + +.fa-caret-down::before { + content: "\f0d7"; } + +.fa-house-chimney-medical::before { + content: "\f7f2"; } + +.fa-clinic-medical::before { + content: "\f7f2"; } + +.fa-temperature-three-quarters::before { + content: "\f2c8"; } + +.fa-temperature-3::before { + content: "\f2c8"; } + +.fa-thermometer-3::before { + content: "\f2c8"; } + +.fa-thermometer-three-quarters::before { + content: "\f2c8"; } + +.fa-mobile-screen::before { + content: "\f3cf"; } + +.fa-mobile-android-alt::before { + content: "\f3cf"; } + +.fa-plane-up::before { + content: "\e22d"; } + +.fa-piggy-bank::before { + content: "\f4d3"; } + +.fa-battery-half::before { + content: "\f242"; } + +.fa-battery-3::before { + content: "\f242"; } + +.fa-mountain-city::before { + content: "\e52e"; } + +.fa-coins::before { + content: "\f51e"; } + +.fa-khanda::before { + content: "\f66d"; } + +.fa-sliders::before { + content: "\f1de"; } + +.fa-sliders-h::before { + content: "\f1de"; } + +.fa-folder-tree::before { + content: "\f802"; } + +.fa-network-wired::before { + content: "\f6ff"; } + +.fa-map-pin::before { + content: "\f276"; } + +.fa-hamsa::before { + content: "\f665"; } + +.fa-cent-sign::before { + content: "\e3f5"; } + +.fa-flask::before { + content: "\f0c3"; } + +.fa-person-pregnant::before { + content: "\e31e"; } + +.fa-wand-sparkles::before { + content: "\f72b"; } + +.fa-ellipsis-vertical::before { + content: "\f142"; } + +.fa-ellipsis-v::before { + content: "\f142"; } + +.fa-ticket::before { + content: "\f145"; } + +.fa-power-off::before { + content: "\f011"; } + +.fa-right-long::before { + content: "\f30b"; } + +.fa-long-arrow-alt-right::before { + content: "\f30b"; } + +.fa-flag-usa::before { + content: "\f74d"; } + +.fa-laptop-file::before { + content: "\e51d"; } + +.fa-tty::before { + content: "\f1e4"; } + +.fa-teletype::before { + content: "\f1e4"; } + +.fa-diagram-next::before { + content: "\e476"; } + +.fa-person-rifle::before { + content: "\e54e"; } + +.fa-house-medical-circle-exclamation::before { + content: "\e512"; } + +.fa-closed-captioning::before { + content: "\f20a"; } + +.fa-person-hiking::before { + content: "\f6ec"; } + +.fa-hiking::before { + content: "\f6ec"; } + +.fa-venus-double::before { + content: "\f226"; } + +.fa-images::before { + content: "\f302"; } + +.fa-calculator::before { + content: "\f1ec"; } + +.fa-people-pulling::before { + content: "\e535"; } + +.fa-n::before { + content: "\4e"; } + +.fa-cable-car::before { + content: "\f7da"; } + +.fa-tram::before { + content: "\f7da"; } + +.fa-cloud-rain::before { + content: "\f73d"; } + +.fa-building-circle-xmark::before { + content: "\e4d4"; } + +.fa-ship::before { + content: "\f21a"; } + +.fa-arrows-down-to-line::before { + content: "\e4b8"; } + +.fa-download::before { + content: "\f019"; } + +.fa-face-grin::before { + content: "\f580"; } + +.fa-grin::before { + content: "\f580"; } + +.fa-delete-left::before { + content: "\f55a"; } + +.fa-backspace::before { + content: "\f55a"; } + +.fa-eye-dropper::before { + content: "\f1fb"; } + +.fa-eye-dropper-empty::before { + content: "\f1fb"; } + +.fa-eyedropper::before { + content: "\f1fb"; } + +.fa-file-circle-check::before { + content: "\e5a0"; } + +.fa-forward::before { + content: "\f04e"; } + +.fa-mobile::before { + content: "\f3ce"; } + +.fa-mobile-android::before { + content: "\f3ce"; } + +.fa-mobile-phone::before { + content: "\f3ce"; } + +.fa-face-meh::before { + content: "\f11a"; } + +.fa-meh::before { + content: "\f11a"; } + +.fa-align-center::before { + content: "\f037"; } + +.fa-book-skull::before { + content: "\f6b7"; } + +.fa-book-dead::before { + content: "\f6b7"; } + +.fa-id-card::before { + content: "\f2c2"; } + +.fa-drivers-license::before { + content: "\f2c2"; } + +.fa-outdent::before { + content: "\f03b"; } + +.fa-dedent::before { + content: "\f03b"; } + +.fa-heart-circle-exclamation::before { + content: "\e4fe"; } + +.fa-house::before { + content: "\f015"; } + +.fa-home::before { + content: "\f015"; } + +.fa-home-alt::before { + content: "\f015"; } + +.fa-home-lg-alt::before { + content: "\f015"; } + +.fa-calendar-week::before { + content: "\f784"; } + +.fa-laptop-medical::before { + content: "\f812"; } + +.fa-b::before { + content: "\42"; } + +.fa-file-medical::before { + content: "\f477"; } + +.fa-dice-one::before { + content: "\f525"; } + +.fa-kiwi-bird::before { + content: "\f535"; } + +.fa-arrow-right-arrow-left::before { + content: "\f0ec"; } + +.fa-exchange::before { + content: "\f0ec"; } + +.fa-rotate-right::before { + content: "\f2f9"; } + +.fa-redo-alt::before { + content: "\f2f9"; } + +.fa-rotate-forward::before { + content: "\f2f9"; } + +.fa-utensils::before { + content: "\f2e7"; } + +.fa-cutlery::before { + content: "\f2e7"; } + +.fa-arrow-up-wide-short::before { + content: "\f161"; } + +.fa-sort-amount-up::before { + content: "\f161"; } + +.fa-mill-sign::before { + content: "\e1ed"; } + +.fa-bowl-rice::before { + content: "\e2eb"; } + +.fa-skull::before { + content: "\f54c"; } + +.fa-tower-broadcast::before { + content: "\f519"; } + +.fa-broadcast-tower::before { + content: "\f519"; } + +.fa-truck-pickup::before { + content: "\f63c"; } + +.fa-up-long::before { + content: "\f30c"; } + +.fa-long-arrow-alt-up::before { + content: "\f30c"; } + +.fa-stop::before { + content: "\f04d"; } + +.fa-code-merge::before { + content: "\f387"; } + +.fa-upload::before { + content: "\f093"; } + +.fa-hurricane::before { + content: "\f751"; } + +.fa-mound::before { + content: "\e52d"; } + +.fa-toilet-portable::before { + content: "\e583"; } + +.fa-compact-disc::before { + content: "\f51f"; } + +.fa-file-arrow-down::before { + content: "\f56d"; } + +.fa-file-download::before { + content: "\f56d"; } + +.fa-caravan::before { + content: "\f8ff"; } + +.fa-shield-cat::before { + content: "\e572"; } + +.fa-bolt::before { + content: "\f0e7"; } + +.fa-zap::before { + content: "\f0e7"; } + +.fa-glass-water::before { + content: "\e4f4"; } + +.fa-oil-well::before { + content: "\e532"; } + +.fa-vault::before { + content: "\e2c5"; } + +.fa-mars::before { + content: "\f222"; } + +.fa-toilet::before { + content: "\f7d8"; } + +.fa-plane-circle-xmark::before { + content: "\e557"; } + +.fa-yen-sign::before { + content: "\f157"; } + +.fa-cny::before { + content: "\f157"; } + +.fa-jpy::before { + content: "\f157"; } + +.fa-rmb::before { + content: "\f157"; } + +.fa-yen::before { + content: "\f157"; } + +.fa-ruble-sign::before { + content: "\f158"; } + +.fa-rouble::before { + content: "\f158"; } + +.fa-rub::before { + content: "\f158"; } + +.fa-ruble::before { + content: "\f158"; } + +.fa-sun::before { + content: "\f185"; } + +.fa-guitar::before { + content: "\f7a6"; } + +.fa-face-laugh-wink::before { + content: "\f59c"; } + +.fa-laugh-wink::before { + content: "\f59c"; } + +.fa-horse-head::before { + content: "\f7ab"; } + +.fa-bore-hole::before { + content: "\e4c3"; } + +.fa-industry::before { + content: "\f275"; } + +.fa-circle-down::before { + content: "\f358"; } + +.fa-arrow-alt-circle-down::before { + content: "\f358"; } + +.fa-arrows-turn-to-dots::before { + content: "\e4c1"; } + +.fa-florin-sign::before { + content: "\e184"; } + +.fa-arrow-down-short-wide::before { + content: "\f884"; } + +.fa-sort-amount-desc::before { + content: "\f884"; } + +.fa-sort-amount-down-alt::before { + content: "\f884"; } + +.fa-less-than::before { + content: "\3c"; } + +.fa-angle-down::before { + content: "\f107"; } + +.fa-car-tunnel::before { + content: "\e4de"; } + +.fa-head-side-cough::before { + content: "\e061"; } + +.fa-grip-lines::before { + content: "\f7a4"; } + +.fa-thumbs-down::before { + content: "\f165"; } + +.fa-user-lock::before { + content: "\f502"; } + +.fa-arrow-right-long::before { + content: "\f178"; } + +.fa-long-arrow-right::before { + content: "\f178"; } + +.fa-anchor-circle-xmark::before { + content: "\e4ac"; } + +.fa-ellipsis::before { + content: "\f141"; } + +.fa-ellipsis-h::before { + content: "\f141"; } + +.fa-chess-pawn::before { + content: "\f443"; } + +.fa-kit-medical::before { + content: "\f479"; } + +.fa-first-aid::before { + content: "\f479"; } + +.fa-person-through-window::before { + content: "\e5a9"; } + +.fa-toolbox::before { + content: "\f552"; } + +.fa-hands-holding-circle::before { + content: "\e4fb"; } + +.fa-bug::before { + content: "\f188"; } + +.fa-credit-card::before { + content: "\f09d"; } + +.fa-credit-card-alt::before { + content: "\f09d"; } + +.fa-car::before { + content: "\f1b9"; } + +.fa-automobile::before { + content: "\f1b9"; } + +.fa-hand-holding-hand::before { + content: "\e4f7"; } + +.fa-book-open-reader::before { + content: "\f5da"; } + +.fa-book-reader::before { + content: "\f5da"; } + +.fa-mountain-sun::before { + content: "\e52f"; } + +.fa-arrows-left-right-to-line::before { + content: "\e4ba"; } + +.fa-dice-d20::before { + content: "\f6cf"; } + +.fa-truck-droplet::before { + content: "\e58c"; } + +.fa-file-circle-xmark::before { + content: "\e5a1"; } + +.fa-temperature-arrow-up::before { + content: "\e040"; } + +.fa-temperature-up::before { + content: "\e040"; } + +.fa-medal::before { + content: "\f5a2"; } + +.fa-bed::before { + content: "\f236"; } + +.fa-square-h::before { + content: "\f0fd"; } + +.fa-h-square::before { + content: "\f0fd"; } + +.fa-podcast::before { + content: "\f2ce"; } + +.fa-temperature-full::before { + content: "\f2c7"; } + +.fa-temperature-4::before { + content: "\f2c7"; } + +.fa-thermometer-4::before { + content: "\f2c7"; } + +.fa-thermometer-full::before { + content: "\f2c7"; } + +.fa-bell::before { + content: "\f0f3"; } + +.fa-superscript::before { + content: "\f12b"; } + +.fa-plug-circle-xmark::before { + content: "\e560"; } + +.fa-star-of-life::before { + content: "\f621"; } + +.fa-phone-slash::before { + content: "\f3dd"; } + +.fa-paint-roller::before { + content: "\f5aa"; } + +.fa-handshake-angle::before { + content: "\f4c4"; } + +.fa-hands-helping::before { + content: "\f4c4"; } + +.fa-location-dot::before { + content: "\f3c5"; } + +.fa-map-marker-alt::before { + content: "\f3c5"; } + +.fa-file::before { + content: "\f15b"; } + +.fa-greater-than::before { + content: "\3e"; } + +.fa-person-swimming::before { + content: "\f5c4"; } + +.fa-swimmer::before { + content: "\f5c4"; } + +.fa-arrow-down::before { + content: "\f063"; } + +.fa-droplet::before { + content: "\f043"; } + +.fa-tint::before { + content: "\f043"; } + +.fa-eraser::before { + content: "\f12d"; } + +.fa-earth-americas::before { + content: "\f57d"; } + +.fa-earth::before { + content: "\f57d"; } + +.fa-earth-america::before { + content: "\f57d"; } + +.fa-globe-americas::before { + content: "\f57d"; } + +.fa-person-burst::before { + content: "\e53b"; } + +.fa-dove::before { + content: "\f4ba"; } + +.fa-battery-empty::before { + content: "\f244"; } + +.fa-battery-0::before { + content: "\f244"; } + +.fa-socks::before { + content: "\f696"; } + +.fa-inbox::before { + content: "\f01c"; } + +.fa-section::before { + content: "\e447"; } + +.fa-gauge-high::before { + content: "\f625"; } + +.fa-tachometer-alt::before { + content: "\f625"; } + +.fa-tachometer-alt-fast::before { + content: "\f625"; } + +.fa-envelope-open-text::before { + content: "\f658"; } + +.fa-hospital::before { + content: "\f0f8"; } + +.fa-hospital-alt::before { + content: "\f0f8"; } + +.fa-hospital-wide::before { + content: "\f0f8"; } + +.fa-wine-bottle::before { + content: "\f72f"; } + +.fa-chess-rook::before { + content: "\f447"; } + +.fa-bars-staggered::before { + content: "\f550"; } + +.fa-reorder::before { + content: "\f550"; } + +.fa-stream::before { + content: "\f550"; } + +.fa-dharmachakra::before { + content: "\f655"; } + +.fa-hotdog::before { + content: "\f80f"; } + +.fa-person-walking-with-cane::before { + content: "\f29d"; } + +.fa-blind::before { + content: "\f29d"; } + +.fa-drum::before { + content: "\f569"; } + +.fa-ice-cream::before { + content: "\f810"; } + +.fa-heart-circle-bolt::before { + content: "\e4fc"; } + +.fa-fax::before { + content: "\f1ac"; } + +.fa-paragraph::before { + content: "\f1dd"; } + +.fa-check-to-slot::before { + content: "\f772"; } + +.fa-vote-yea::before { + content: "\f772"; } + +.fa-star-half::before { + content: "\f089"; } + +.fa-boxes-stacked::before { + content: "\f468"; } + +.fa-boxes::before { + content: "\f468"; } + +.fa-boxes-alt::before { + content: "\f468"; } + +.fa-link::before { + content: "\f0c1"; } + +.fa-chain::before { + content: "\f0c1"; } + +.fa-ear-listen::before { + content: "\f2a2"; } + +.fa-assistive-listening-systems::before { + content: "\f2a2"; } + +.fa-tree-city::before { + content: "\e587"; } + +.fa-play::before { + content: "\f04b"; } + +.fa-font::before { + content: "\f031"; } + +.fa-table-cells-row-lock::before { + content: "\e67a"; } + +.fa-rupiah-sign::before { + content: "\e23d"; } + +.fa-magnifying-glass::before { + content: "\f002"; } + +.fa-search::before { + content: "\f002"; } + +.fa-table-tennis-paddle-ball::before { + content: "\f45d"; } + +.fa-ping-pong-paddle-ball::before { + content: "\f45d"; } + +.fa-table-tennis::before { + content: "\f45d"; } + +.fa-person-dots-from-line::before { + content: "\f470"; } + +.fa-diagnoses::before { + content: "\f470"; } + +.fa-trash-can-arrow-up::before { + content: "\f82a"; } + +.fa-trash-restore-alt::before { + content: "\f82a"; } + +.fa-naira-sign::before { + content: "\e1f6"; } + +.fa-cart-arrow-down::before { + content: "\f218"; } + +.fa-walkie-talkie::before { + content: "\f8ef"; } + +.fa-file-pen::before { + content: "\f31c"; } + +.fa-file-edit::before { + content: "\f31c"; } + +.fa-receipt::before { + content: "\f543"; } + +.fa-square-pen::before { + content: "\f14b"; } + +.fa-pen-square::before { + content: "\f14b"; } + +.fa-pencil-square::before { + content: "\f14b"; } + +.fa-suitcase-rolling::before { + content: "\f5c1"; } + +.fa-person-circle-exclamation::before { + content: "\e53f"; } + +.fa-chevron-down::before { + content: "\f078"; } + +.fa-battery-full::before { + content: "\f240"; } + +.fa-battery::before { + content: "\f240"; } + +.fa-battery-5::before { + content: "\f240"; } + +.fa-skull-crossbones::before { + content: "\f714"; } + +.fa-code-compare::before { + content: "\e13a"; } + +.fa-list-ul::before { + content: "\f0ca"; } + +.fa-list-dots::before { + content: "\f0ca"; } + +.fa-school-lock::before { + content: "\e56f"; } + +.fa-tower-cell::before { + content: "\e585"; } + +.fa-down-long::before { + content: "\f309"; } + +.fa-long-arrow-alt-down::before { + content: "\f309"; } + +.fa-ranking-star::before { + content: "\e561"; } + +.fa-chess-king::before { + content: "\f43f"; } + +.fa-person-harassing::before { + content: "\e549"; } + +.fa-brazilian-real-sign::before { + content: "\e46c"; } + +.fa-landmark-dome::before { + content: "\f752"; } + +.fa-landmark-alt::before { + content: "\f752"; } + +.fa-arrow-up::before { + content: "\f062"; } + +.fa-tv::before { + content: "\f26c"; } + +.fa-television::before { + content: "\f26c"; } + +.fa-tv-alt::before { + content: "\f26c"; } + +.fa-shrimp::before { + content: "\e448"; } + +.fa-list-check::before { + content: "\f0ae"; } + +.fa-tasks::before { + content: "\f0ae"; } + +.fa-jug-detergent::before { + content: "\e519"; } + +.fa-circle-user::before { + content: "\f2bd"; } + +.fa-user-circle::before { + content: "\f2bd"; } + +.fa-user-shield::before { + content: "\f505"; } + +.fa-wind::before { + content: "\f72e"; } + +.fa-car-burst::before { + content: "\f5e1"; } + +.fa-car-crash::before { + content: "\f5e1"; } + +.fa-y::before { + content: "\59"; } + +.fa-person-snowboarding::before { + content: "\f7ce"; } + +.fa-snowboarding::before { + content: "\f7ce"; } + +.fa-truck-fast::before { + content: "\f48b"; } + +.fa-shipping-fast::before { + content: "\f48b"; } + +.fa-fish::before { + content: "\f578"; } + +.fa-user-graduate::before { + content: "\f501"; } + +.fa-circle-half-stroke::before { + content: "\f042"; } + +.fa-adjust::before { + content: "\f042"; } + +.fa-clapperboard::before { + content: "\e131"; } + +.fa-circle-radiation::before { + content: "\f7ba"; } + +.fa-radiation-alt::before { + content: "\f7ba"; } + +.fa-baseball::before { + content: "\f433"; } + +.fa-baseball-ball::before { + content: "\f433"; } + +.fa-jet-fighter-up::before { + content: "\e518"; } + +.fa-diagram-project::before { + content: "\f542"; } + +.fa-project-diagram::before { + content: "\f542"; } + +.fa-copy::before { + content: "\f0c5"; } + +.fa-volume-xmark::before { + content: "\f6a9"; } + +.fa-volume-mute::before { + content: "\f6a9"; } + +.fa-volume-times::before { + content: "\f6a9"; } + +.fa-hand-sparkles::before { + content: "\e05d"; } + +.fa-grip::before { + content: "\f58d"; } + +.fa-grip-horizontal::before { + content: "\f58d"; } + +.fa-share-from-square::before { + content: "\f14d"; } + +.fa-share-square::before { + content: "\f14d"; } + +.fa-child-combatant::before { + content: "\e4e0"; } + +.fa-child-rifle::before { + content: "\e4e0"; } + +.fa-gun::before { + content: "\e19b"; } + +.fa-square-phone::before { + content: "\f098"; } + +.fa-phone-square::before { + content: "\f098"; } + +.fa-plus::before { + content: "\2b"; } + +.fa-add::before { + content: "\2b"; } + +.fa-expand::before { + content: "\f065"; } + +.fa-computer::before { + content: "\e4e5"; } + +.fa-xmark::before { + content: "\f00d"; } + +.fa-close::before { + content: "\f00d"; } + +.fa-multiply::before { + content: "\f00d"; } + +.fa-remove::before { + content: "\f00d"; } + +.fa-times::before { + content: "\f00d"; } + +.fa-arrows-up-down-left-right::before { + content: "\f047"; } + +.fa-arrows::before { + content: "\f047"; } + +.fa-chalkboard-user::before { + content: "\f51c"; } + +.fa-chalkboard-teacher::before { + content: "\f51c"; } + +.fa-peso-sign::before { + content: "\e222"; } + +.fa-building-shield::before { + content: "\e4d8"; } + +.fa-baby::before { + content: "\f77c"; } + +.fa-users-line::before { + content: "\e592"; } + +.fa-quote-left::before { + content: "\f10d"; } + +.fa-quote-left-alt::before { + content: "\f10d"; } + +.fa-tractor::before { + content: "\f722"; } + +.fa-trash-arrow-up::before { + content: "\f829"; } + +.fa-trash-restore::before { + content: "\f829"; } + +.fa-arrow-down-up-lock::before { + content: "\e4b0"; } + +.fa-lines-leaning::before { + content: "\e51e"; } + +.fa-ruler-combined::before { + content: "\f546"; } + +.fa-copyright::before { + content: "\f1f9"; } + +.fa-equals::before { + content: "\3d"; } + +.fa-blender::before { + content: "\f517"; } + +.fa-teeth::before { + content: "\f62e"; } + +.fa-shekel-sign::before { + content: "\f20b"; } + +.fa-ils::before { + content: "\f20b"; } + +.fa-shekel::before { + content: "\f20b"; } + +.fa-sheqel::before { + content: "\f20b"; } + +.fa-sheqel-sign::before { + content: "\f20b"; } + +.fa-map::before { + content: "\f279"; } + +.fa-rocket::before { + content: "\f135"; } + +.fa-photo-film::before { + content: "\f87c"; } + +.fa-photo-video::before { + content: "\f87c"; } + +.fa-folder-minus::before { + content: "\f65d"; } + +.fa-store::before { + content: "\f54e"; } + +.fa-arrow-trend-up::before { + content: "\e098"; } + +.fa-plug-circle-minus::before { + content: "\e55e"; } + +.fa-sign-hanging::before { + content: "\f4d9"; } + +.fa-sign::before { + content: "\f4d9"; } + +.fa-bezier-curve::before { + content: "\f55b"; } + +.fa-bell-slash::before { + content: "\f1f6"; } + +.fa-tablet::before { + content: "\f3fb"; } + +.fa-tablet-android::before { + content: "\f3fb"; } + +.fa-school-flag::before { + content: "\e56e"; } + +.fa-fill::before { + content: "\f575"; } + +.fa-angle-up::before { + content: "\f106"; } + +.fa-drumstick-bite::before { + content: "\f6d7"; } + +.fa-holly-berry::before { + content: "\f7aa"; } + +.fa-chevron-left::before { + content: "\f053"; } + +.fa-bacteria::before { + content: "\e059"; } + +.fa-hand-lizard::before { + content: "\f258"; } + +.fa-notdef::before { + content: "\e1fe"; } + +.fa-disease::before { + content: "\f7fa"; } + +.fa-briefcase-medical::before { + content: "\f469"; } + +.fa-genderless::before { + content: "\f22d"; } + +.fa-chevron-right::before { + content: "\f054"; } + +.fa-retweet::before { + content: "\f079"; } + +.fa-car-rear::before { + content: "\f5de"; } + +.fa-car-alt::before { + content: "\f5de"; } + +.fa-pump-soap::before { + content: "\e06b"; } + +.fa-video-slash::before { + content: "\f4e2"; } + +.fa-battery-quarter::before { + content: "\f243"; } + +.fa-battery-2::before { + content: "\f243"; } + +.fa-radio::before { + content: "\f8d7"; } + +.fa-baby-carriage::before { + content: "\f77d"; } + +.fa-carriage-baby::before { + content: "\f77d"; } + +.fa-traffic-light::before { + content: "\f637"; } + +.fa-thermometer::before { + content: "\f491"; } + +.fa-vr-cardboard::before { + content: "\f729"; } + +.fa-hand-middle-finger::before { + content: "\f806"; } + +.fa-percent::before { + content: "\25"; } + +.fa-percentage::before { + content: "\25"; } + +.fa-truck-moving::before { + content: "\f4df"; } + +.fa-glass-water-droplet::before { + content: "\e4f5"; } + +.fa-display::before { + content: "\e163"; } + +.fa-face-smile::before { + content: "\f118"; } + +.fa-smile::before { + content: "\f118"; } + +.fa-thumbtack::before { + content: "\f08d"; } + +.fa-thumb-tack::before { + content: "\f08d"; } + +.fa-trophy::before { + content: "\f091"; } + +.fa-person-praying::before { + content: "\f683"; } + +.fa-pray::before { + content: "\f683"; } + +.fa-hammer::before { + content: "\f6e3"; } + +.fa-hand-peace::before { + content: "\f25b"; } + +.fa-rotate::before { + content: "\f2f1"; } + +.fa-sync-alt::before { + content: "\f2f1"; } + +.fa-spinner::before { + content: "\f110"; } + +.fa-robot::before { + content: "\f544"; } + +.fa-peace::before { + content: "\f67c"; } + +.fa-gears::before { + content: "\f085"; } + +.fa-cogs::before { + content: "\f085"; } + +.fa-warehouse::before { + content: "\f494"; } + +.fa-arrow-up-right-dots::before { + content: "\e4b7"; } + +.fa-splotch::before { + content: "\f5bc"; } + +.fa-face-grin-hearts::before { + content: "\f584"; } + +.fa-grin-hearts::before { + content: "\f584"; } + +.fa-dice-four::before { + content: "\f524"; } + +.fa-sim-card::before { + content: "\f7c4"; } + +.fa-transgender::before { + content: "\f225"; } + +.fa-transgender-alt::before { + content: "\f225"; } + +.fa-mercury::before { + content: "\f223"; } + +.fa-arrow-turn-down::before { + content: "\f149"; } + +.fa-level-down::before { + content: "\f149"; } + +.fa-person-falling-burst::before { + content: "\e547"; } + +.fa-award::before { + content: "\f559"; } + +.fa-ticket-simple::before { + content: "\f3ff"; } + +.fa-ticket-alt::before { + content: "\f3ff"; } + +.fa-building::before { + content: "\f1ad"; } + +.fa-angles-left::before { + content: "\f100"; } + +.fa-angle-double-left::before { + content: "\f100"; } + +.fa-qrcode::before { + content: "\f029"; } + +.fa-clock-rotate-left::before { + content: "\f1da"; } + +.fa-history::before { + content: "\f1da"; } + +.fa-face-grin-beam-sweat::before { + content: "\f583"; } + +.fa-grin-beam-sweat::before { + content: "\f583"; } + +.fa-file-export::before { + content: "\f56e"; } + +.fa-arrow-right-from-file::before { + content: "\f56e"; } + +.fa-shield::before { + content: "\f132"; } + +.fa-shield-blank::before { + content: "\f132"; } + +.fa-arrow-up-short-wide::before { + content: "\f885"; } + +.fa-sort-amount-up-alt::before { + content: "\f885"; } + +.fa-house-medical::before { + content: "\e3b2"; } + +.fa-golf-ball-tee::before { + content: "\f450"; } + +.fa-golf-ball::before { + content: "\f450"; } + +.fa-circle-chevron-left::before { + content: "\f137"; } + +.fa-chevron-circle-left::before { + content: "\f137"; } + +.fa-house-chimney-window::before { + content: "\e00d"; } + +.fa-pen-nib::before { + content: "\f5ad"; } + +.fa-tent-arrow-turn-left::before { + content: "\e580"; } + +.fa-tents::before { + content: "\e582"; } + +.fa-wand-magic::before { + content: "\f0d0"; } + +.fa-magic::before { + content: "\f0d0"; } + +.fa-dog::before { + content: "\f6d3"; } + +.fa-carrot::before { + content: "\f787"; } + +.fa-moon::before { + content: "\f186"; } + +.fa-wine-glass-empty::before { + content: "\f5ce"; } + +.fa-wine-glass-alt::before { + content: "\f5ce"; } + +.fa-cheese::before { + content: "\f7ef"; } + +.fa-yin-yang::before { + content: "\f6ad"; } + +.fa-music::before { + content: "\f001"; } + +.fa-code-commit::before { + content: "\f386"; } + +.fa-temperature-low::before { + content: "\f76b"; } + +.fa-person-biking::before { + content: "\f84a"; } + +.fa-biking::before { + content: "\f84a"; } + +.fa-broom::before { + content: "\f51a"; } + +.fa-shield-heart::before { + content: "\e574"; } + +.fa-gopuram::before { + content: "\f664"; } + +.fa-earth-oceania::before { + content: "\e47b"; } + +.fa-globe-oceania::before { + content: "\e47b"; } + +.fa-square-xmark::before { + content: "\f2d3"; } + +.fa-times-square::before { + content: "\f2d3"; } + +.fa-xmark-square::before { + content: "\f2d3"; } + +.fa-hashtag::before { + content: "\23"; } + +.fa-up-right-and-down-left-from-center::before { + content: "\f424"; } + +.fa-expand-alt::before { + content: "\f424"; } + +.fa-oil-can::before { + content: "\f613"; } + +.fa-t::before { + content: "\54"; } + +.fa-hippo::before { + content: "\f6ed"; } + +.fa-chart-column::before { + content: "\e0e3"; } + +.fa-infinity::before { + content: "\f534"; } + +.fa-vial-circle-check::before { + content: "\e596"; } + +.fa-person-arrow-down-to-line::before { + content: "\e538"; } + +.fa-voicemail::before { + content: "\f897"; } + +.fa-fan::before { + content: "\f863"; } + +.fa-person-walking-luggage::before { + content: "\e554"; } + +.fa-up-down::before { + content: "\f338"; } + +.fa-arrows-alt-v::before { + content: "\f338"; } + +.fa-cloud-moon-rain::before { + content: "\f73c"; } + +.fa-calendar::before { + content: "\f133"; } + +.fa-trailer::before { + content: "\e041"; } + +.fa-bahai::before { + content: "\f666"; } + +.fa-haykal::before { + content: "\f666"; } + +.fa-sd-card::before { + content: "\f7c2"; } + +.fa-dragon::before { + content: "\f6d5"; } + +.fa-shoe-prints::before { + content: "\f54b"; } + +.fa-circle-plus::before { + content: "\f055"; } + +.fa-plus-circle::before { + content: "\f055"; } + +.fa-face-grin-tongue-wink::before { + content: "\f58b"; } + +.fa-grin-tongue-wink::before { + content: "\f58b"; } + +.fa-hand-holding::before { + content: "\f4bd"; } + +.fa-plug-circle-exclamation::before { + content: "\e55d"; } + +.fa-link-slash::before { + content: "\f127"; } + +.fa-chain-broken::before { + content: "\f127"; } + +.fa-chain-slash::before { + content: "\f127"; } + +.fa-unlink::before { + content: "\f127"; } + +.fa-clone::before { + content: "\f24d"; } + +.fa-person-walking-arrow-loop-left::before { + content: "\e551"; } + +.fa-arrow-up-z-a::before { + content: "\f882"; } + +.fa-sort-alpha-up-alt::before { + content: "\f882"; } + +.fa-fire-flame-curved::before { + content: "\f7e4"; } + +.fa-fire-alt::before { + content: "\f7e4"; } + +.fa-tornado::before { + content: "\f76f"; } + +.fa-file-circle-plus::before { + content: "\e494"; } + +.fa-book-quran::before { + content: "\f687"; } + +.fa-quran::before { + content: "\f687"; } + +.fa-anchor::before { + content: "\f13d"; } + +.fa-border-all::before { + content: "\f84c"; } + +.fa-face-angry::before { + content: "\f556"; } + +.fa-angry::before { + content: "\f556"; } + +.fa-cookie-bite::before { + content: "\f564"; } + +.fa-arrow-trend-down::before { + content: "\e097"; } + +.fa-rss::before { + content: "\f09e"; } + +.fa-feed::before { + content: "\f09e"; } + +.fa-draw-polygon::before { + content: "\f5ee"; } + +.fa-scale-balanced::before { + content: "\f24e"; } + +.fa-balance-scale::before { + content: "\f24e"; } + +.fa-gauge-simple-high::before { + content: "\f62a"; } + +.fa-tachometer::before { + content: "\f62a"; } + +.fa-tachometer-fast::before { + content: "\f62a"; } + +.fa-shower::before { + content: "\f2cc"; } + +.fa-desktop::before { + content: "\f390"; } + +.fa-desktop-alt::before { + content: "\f390"; } + +.fa-m::before { + content: "\4d"; } + +.fa-table-list::before { + content: "\f00b"; } + +.fa-th-list::before { + content: "\f00b"; } + +.fa-comment-sms::before { + content: "\f7cd"; } + +.fa-sms::before { + content: "\f7cd"; } + +.fa-book::before { + content: "\f02d"; } + +.fa-user-plus::before { + content: "\f234"; } + +.fa-check::before { + content: "\f00c"; } + +.fa-battery-three-quarters::before { + content: "\f241"; } + +.fa-battery-4::before { + content: "\f241"; } + +.fa-house-circle-check::before { + content: "\e509"; } + +.fa-angle-left::before { + content: "\f104"; } + +.fa-diagram-successor::before { + content: "\e47a"; } + +.fa-truck-arrow-right::before { + content: "\e58b"; } + +.fa-arrows-split-up-and-left::before { + content: "\e4bc"; } + +.fa-hand-fist::before { + content: "\f6de"; } + +.fa-fist-raised::before { + content: "\f6de"; } + +.fa-cloud-moon::before { + content: "\f6c3"; } + +.fa-briefcase::before { + content: "\f0b1"; } + +.fa-person-falling::before { + content: "\e546"; } + +.fa-image-portrait::before { + content: "\f3e0"; } + +.fa-portrait::before { + content: "\f3e0"; } + +.fa-user-tag::before { + content: "\f507"; } + +.fa-rug::before { + content: "\e569"; } + +.fa-earth-europe::before { + content: "\f7a2"; } + +.fa-globe-europe::before { + content: "\f7a2"; } + +.fa-cart-flatbed-suitcase::before { + content: "\f59d"; } + +.fa-luggage-cart::before { + content: "\f59d"; } + +.fa-rectangle-xmark::before { + content: "\f410"; } + +.fa-rectangle-times::before { + content: "\f410"; } + +.fa-times-rectangle::before { + content: "\f410"; } + +.fa-window-close::before { + content: "\f410"; } + +.fa-baht-sign::before { + content: "\e0ac"; } + +.fa-book-open::before { + content: "\f518"; } + +.fa-book-journal-whills::before { + content: "\f66a"; } + +.fa-journal-whills::before { + content: "\f66a"; } + +.fa-handcuffs::before { + content: "\e4f8"; } + +.fa-triangle-exclamation::before { + content: "\f071"; } + +.fa-exclamation-triangle::before { + content: "\f071"; } + +.fa-warning::before { + content: "\f071"; } + +.fa-database::before { + content: "\f1c0"; } + +.fa-share::before { + content: "\f064"; } + +.fa-mail-forward::before { + content: "\f064"; } + +.fa-bottle-droplet::before { + content: "\e4c4"; } + +.fa-mask-face::before { + content: "\e1d7"; } + +.fa-hill-rockslide::before { + content: "\e508"; } + +.fa-right-left::before { + content: "\f362"; } + +.fa-exchange-alt::before { + content: "\f362"; } + +.fa-paper-plane::before { + content: "\f1d8"; } + +.fa-road-circle-exclamation::before { + content: "\e565"; } + +.fa-dungeon::before { + content: "\f6d9"; } + +.fa-align-right::before { + content: "\f038"; } + +.fa-money-bill-1-wave::before { + content: "\f53b"; } + +.fa-money-bill-wave-alt::before { + content: "\f53b"; } + +.fa-life-ring::before { + content: "\f1cd"; } + +.fa-hands::before { + content: "\f2a7"; } + +.fa-sign-language::before { + content: "\f2a7"; } + +.fa-signing::before { + content: "\f2a7"; } + +.fa-calendar-day::before { + content: "\f783"; } + +.fa-water-ladder::before { + content: "\f5c5"; } + +.fa-ladder-water::before { + content: "\f5c5"; } + +.fa-swimming-pool::before { + content: "\f5c5"; } + +.fa-arrows-up-down::before { + content: "\f07d"; } + +.fa-arrows-v::before { + content: "\f07d"; } + +.fa-face-grimace::before { + content: "\f57f"; } + +.fa-grimace::before { + content: "\f57f"; } + +.fa-wheelchair-move::before { + content: "\e2ce"; } + +.fa-wheelchair-alt::before { + content: "\e2ce"; } + +.fa-turn-down::before { + content: "\f3be"; } + +.fa-level-down-alt::before { + content: "\f3be"; } + +.fa-person-walking-arrow-right::before { + content: "\e552"; } + +.fa-square-envelope::before { + content: "\f199"; } + +.fa-envelope-square::before { + content: "\f199"; } + +.fa-dice::before { + content: "\f522"; } + +.fa-bowling-ball::before { + content: "\f436"; } + +.fa-brain::before { + content: "\f5dc"; } + +.fa-bandage::before { + content: "\f462"; } + +.fa-band-aid::before { + content: "\f462"; } + +.fa-calendar-minus::before { + content: "\f272"; } + +.fa-circle-xmark::before { + content: "\f057"; } + +.fa-times-circle::before { + content: "\f057"; } + +.fa-xmark-circle::before { + content: "\f057"; } + +.fa-gifts::before { + content: "\f79c"; } + +.fa-hotel::before { + content: "\f594"; } + +.fa-earth-asia::before { + content: "\f57e"; } + +.fa-globe-asia::before { + content: "\f57e"; } + +.fa-id-card-clip::before { + content: "\f47f"; } + +.fa-id-card-alt::before { + content: "\f47f"; } + +.fa-magnifying-glass-plus::before { + content: "\f00e"; } + +.fa-search-plus::before { + content: "\f00e"; } + +.fa-thumbs-up::before { + content: "\f164"; } + +.fa-user-clock::before { + content: "\f4fd"; } + +.fa-hand-dots::before { + content: "\f461"; } + +.fa-allergies::before { + content: "\f461"; } + +.fa-file-invoice::before { + content: "\f570"; } + +.fa-window-minimize::before { + content: "\f2d1"; } + +.fa-mug-saucer::before { + content: "\f0f4"; } + +.fa-coffee::before { + content: "\f0f4"; } + +.fa-brush::before { + content: "\f55d"; } + +.fa-mask::before { + content: "\f6fa"; } + +.fa-magnifying-glass-minus::before { + content: "\f010"; } + +.fa-search-minus::before { + content: "\f010"; } + +.fa-ruler-vertical::before { + content: "\f548"; } + +.fa-user-large::before { + content: "\f406"; } + +.fa-user-alt::before { + content: "\f406"; } + +.fa-train-tram::before { + content: "\e5b4"; } + +.fa-user-nurse::before { + content: "\f82f"; } + +.fa-syringe::before { + content: "\f48e"; } + +.fa-cloud-sun::before { + content: "\f6c4"; } + +.fa-stopwatch-20::before { + content: "\e06f"; } + +.fa-square-full::before { + content: "\f45c"; } + +.fa-magnet::before { + content: "\f076"; } + +.fa-jar::before { + content: "\e516"; } + +.fa-note-sticky::before { + content: "\f249"; } + +.fa-sticky-note::before { + content: "\f249"; } + +.fa-bug-slash::before { + content: "\e490"; } + +.fa-arrow-up-from-water-pump::before { + content: "\e4b6"; } + +.fa-bone::before { + content: "\f5d7"; } + +.fa-user-injured::before { + content: "\f728"; } + +.fa-face-sad-tear::before { + content: "\f5b4"; } + +.fa-sad-tear::before { + content: "\f5b4"; } + +.fa-plane::before { + content: "\f072"; } + +.fa-tent-arrows-down::before { + content: "\e581"; } + +.fa-exclamation::before { + content: "\21"; } + +.fa-arrows-spin::before { + content: "\e4bb"; } + +.fa-print::before { + content: "\f02f"; } + +.fa-turkish-lira-sign::before { + content: "\e2bb"; } + +.fa-try::before { + content: "\e2bb"; } + +.fa-turkish-lira::before { + content: "\e2bb"; } + +.fa-dollar-sign::before { + content: "\24"; } + +.fa-dollar::before { + content: "\24"; } + +.fa-usd::before { + content: "\24"; } + +.fa-x::before { + content: "\58"; } + +.fa-magnifying-glass-dollar::before { + content: "\f688"; } + +.fa-search-dollar::before { + content: "\f688"; } + +.fa-users-gear::before { + content: "\f509"; } + +.fa-users-cog::before { + content: "\f509"; } + +.fa-person-military-pointing::before { + content: "\e54a"; } + +.fa-building-columns::before { + content: "\f19c"; } + +.fa-bank::before { + content: "\f19c"; } + +.fa-institution::before { + content: "\f19c"; } + +.fa-museum::before { + content: "\f19c"; } + +.fa-university::before { + content: "\f19c"; } + +.fa-umbrella::before { + content: "\f0e9"; } + +.fa-trowel::before { + content: "\e589"; } + +.fa-d::before { + content: "\44"; } + +.fa-stapler::before { + content: "\e5af"; } + +.fa-masks-theater::before { + content: "\f630"; } + +.fa-theater-masks::before { + content: "\f630"; } + +.fa-kip-sign::before { + content: "\e1c4"; } + +.fa-hand-point-left::before { + content: "\f0a5"; } + +.fa-handshake-simple::before { + content: "\f4c6"; } + +.fa-handshake-alt::before { + content: "\f4c6"; } + +.fa-jet-fighter::before { + content: "\f0fb"; } + +.fa-fighter-jet::before { + content: "\f0fb"; } + +.fa-square-share-nodes::before { + content: "\f1e1"; } + +.fa-share-alt-square::before { + content: "\f1e1"; } + +.fa-barcode::before { + content: "\f02a"; } + +.fa-plus-minus::before { + content: "\e43c"; } + +.fa-video::before { + content: "\f03d"; } + +.fa-video-camera::before { + content: "\f03d"; } + +.fa-graduation-cap::before { + content: "\f19d"; } + +.fa-mortar-board::before { + content: "\f19d"; } + +.fa-hand-holding-medical::before { + content: "\e05c"; } + +.fa-person-circle-check::before { + content: "\e53e"; } + +.fa-turn-up::before { + content: "\f3bf"; } + +.fa-level-up-alt::before { + content: "\f3bf"; } + +.sr-only, +.fa-sr-only { + position: absolute; + width: 1px; + height: 1px; + padding: 0; + margin: -1px; + overflow: hidden; + clip: rect(0, 0, 0, 0); + white-space: nowrap; + border-width: 0; } + +.sr-only-focusable:not(:focus), +.fa-sr-only-focusable:not(:focus) { + position: absolute; + width: 1px; + height: 1px; + padding: 0; + margin: -1px; + overflow: hidden; + clip: rect(0, 0, 0, 0); + white-space: nowrap; + border-width: 0; } + +/*! + * Font Awesome Free 6.5.2 by @fontawesome - https://fontawesome.com + * License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) + * Copyright 2024 Fonticons, Inc. + */ +:root, :host { + --fa-style-family-classic: 'Font Awesome 6 Free'; + --fa-font-solid: normal 900 1em/1 'Font Awesome 6 Free'; } + +@font-face { + font-family: 'Font Awesome 6 Free'; + font-style: normal; + font-weight: 900; + font-display: block; + src: url("../webfonts/fa-solid-900.woff2") format("woff2"), url("../webfonts/fa-solid-900.ttf") format("truetype"); } + +.fas, .td-offline-search-results__close-button:after, +.fa-solid { + font-weight: 900; } + +/*! + * Font Awesome Free 6.5.2 by @fontawesome - https://fontawesome.com + * License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) + * Copyright 2024 Fonticons, Inc. + */ +:root, :host { + --fa-style-family-brands: 'Font Awesome 6 Brands'; + --fa-font-brands: normal 400 1em/1 'Font Awesome 6 Brands'; } + +@font-face { + font-family: 'Font Awesome 6 Brands'; + font-style: normal; + font-weight: 400; + font-display: block; + src: url("../webfonts/fa-brands-400.woff2") format("woff2"), url("../webfonts/fa-brands-400.ttf") format("truetype"); } + +.fab, +.fa-brands { + font-weight: 400; } + +.fa-monero:before { + content: "\f3d0"; } + +.fa-hooli:before { + content: "\f427"; } + +.fa-yelp:before { + content: "\f1e9"; } + +.fa-cc-visa:before { + content: "\f1f0"; } + +.fa-lastfm:before { + content: "\f202"; } + +.fa-shopware:before { + content: "\f5b5"; } + +.fa-creative-commons-nc:before { + content: "\f4e8"; } + +.fa-aws:before { + content: "\f375"; } + +.fa-redhat:before { + content: "\f7bc"; } + +.fa-yoast:before { + content: "\f2b1"; } + +.fa-cloudflare:before { + content: "\e07d"; } + +.fa-ups:before { + content: "\f7e0"; } + +.fa-pixiv:before { + content: "\e640"; } + +.fa-wpexplorer:before { + content: "\f2de"; } + +.fa-dyalog:before { + content: "\f399"; } + +.fa-bity:before { + content: "\f37a"; } + +.fa-stackpath:before { + content: "\f842"; } + +.fa-buysellads:before { + content: "\f20d"; } + +.fa-first-order:before { + content: "\f2b0"; } + +.fa-modx:before { + content: "\f285"; } + +.fa-guilded:before { + content: "\e07e"; } + +.fa-vnv:before { + content: "\f40b"; } + +.fa-square-js:before { + content: "\f3b9"; } + +.fa-js-square:before { + content: "\f3b9"; } + +.fa-microsoft:before { + content: "\f3ca"; } + +.fa-qq:before { + content: "\f1d6"; } + +.fa-orcid:before { + content: "\f8d2"; } + +.fa-java:before { + content: "\f4e4"; } + +.fa-invision:before { + content: "\f7b0"; } + +.fa-creative-commons-pd-alt:before { + content: "\f4ed"; } + +.fa-centercode:before { + content: "\f380"; } + +.fa-glide-g:before { + content: "\f2a6"; } + +.fa-drupal:before { + content: "\f1a9"; } + +.fa-jxl:before { + content: "\e67b"; } + +.fa-hire-a-helper:before { + content: "\f3b0"; } + +.fa-creative-commons-by:before { + content: "\f4e7"; } + +.fa-unity:before { + content: "\e049"; } + +.fa-whmcs:before { + content: "\f40d"; } + +.fa-rocketchat:before { + content: "\f3e8"; } + +.fa-vk:before { + content: "\f189"; } + +.fa-untappd:before { + content: "\f405"; } + +.fa-mailchimp:before { + content: "\f59e"; } + +.fa-css3-alt:before { + content: "\f38b"; } + +.fa-square-reddit:before { + content: "\f1a2"; } + +.fa-reddit-square:before { + content: "\f1a2"; } + +.fa-vimeo-v:before { + content: "\f27d"; } + +.fa-contao:before { + content: "\f26d"; } + +.fa-square-font-awesome:before { + content: "\e5ad"; } + +.fa-deskpro:before { + content: "\f38f"; } + +.fa-brave:before { + content: "\e63c"; } + +.fa-sistrix:before { + content: "\f3ee"; } + +.fa-square-instagram:before { + content: "\e055"; } + +.fa-instagram-square:before { + content: "\e055"; } + +.fa-battle-net:before { + content: "\f835"; } + +.fa-the-red-yeti:before { + content: "\f69d"; } + +.fa-square-hacker-news:before { + content: "\f3af"; } + +.fa-hacker-news-square:before { + content: "\f3af"; } + +.fa-edge:before { + content: "\f282"; } + +.fa-threads:before { + content: "\e618"; } + +.fa-napster:before { + content: "\f3d2"; } + +.fa-square-snapchat:before { + content: "\f2ad"; } + +.fa-snapchat-square:before { + content: "\f2ad"; } + +.fa-google-plus-g:before { + content: "\f0d5"; } + +.fa-artstation:before { + content: "\f77a"; } + +.fa-markdown:before { + content: "\f60f"; } + +.fa-sourcetree:before { + content: "\f7d3"; } + +.fa-google-plus:before { + content: "\f2b3"; } + +.fa-diaspora:before { + content: "\f791"; } + +.fa-foursquare:before { + content: "\f180"; } + +.fa-stack-overflow:before { + content: "\f16c"; } + +.fa-github-alt:before { + content: "\f113"; } + +.fa-phoenix-squadron:before { + content: "\f511"; } + +.fa-pagelines:before { + content: "\f18c"; } + +.fa-algolia:before { + content: "\f36c"; } + +.fa-red-river:before { + content: "\f3e3"; } + +.fa-creative-commons-sa:before { + content: "\f4ef"; } + +.fa-safari:before { + content: "\f267"; } + +.fa-google:before { + content: "\f1a0"; } + +.fa-square-font-awesome-stroke:before { + content: "\f35c"; } + +.fa-font-awesome-alt:before { + content: "\f35c"; } + +.fa-atlassian:before { + content: "\f77b"; } + +.fa-linkedin-in:before { + content: "\f0e1"; } + +.fa-digital-ocean:before { + content: "\f391"; } + +.fa-nimblr:before { + content: "\f5a8"; } + +.fa-chromecast:before { + content: "\f838"; } + +.fa-evernote:before { + content: "\f839"; } + +.fa-hacker-news:before { + content: "\f1d4"; } + +.fa-creative-commons-sampling:before { + content: "\f4f0"; } + +.fa-adversal:before { + content: "\f36a"; } + +.fa-creative-commons:before { + content: "\f25e"; } + +.fa-watchman-monitoring:before { + content: "\e087"; } + +.fa-fonticons:before { + content: "\f280"; } + +.fa-weixin:before { + content: "\f1d7"; } + +.fa-shirtsinbulk:before { + content: "\f214"; } + +.fa-codepen:before { + content: "\f1cb"; } + +.fa-git-alt:before { + content: "\f841"; } + +.fa-lyft:before { + content: "\f3c3"; } + +.fa-rev:before { + content: "\f5b2"; } + +.fa-windows:before { + content: "\f17a"; } + +.fa-wizards-of-the-coast:before { + content: "\f730"; } + +.fa-square-viadeo:before { + content: "\f2aa"; } + +.fa-viadeo-square:before { + content: "\f2aa"; } + +.fa-meetup:before { + content: "\f2e0"; } + +.fa-centos:before { + content: "\f789"; } + +.fa-adn:before { + content: "\f170"; } + +.fa-cloudsmith:before { + content: "\f384"; } + +.fa-opensuse:before { + content: "\e62b"; } + +.fa-pied-piper-alt:before { + content: "\f1a8"; } + +.fa-square-dribbble:before { + content: "\f397"; } + +.fa-dribbble-square:before { + content: "\f397"; } + +.fa-codiepie:before { + content: "\f284"; } + +.fa-node:before { + content: "\f419"; } + +.fa-mix:before { + content: "\f3cb"; } + +.fa-steam:before { + content: "\f1b6"; } + +.fa-cc-apple-pay:before { + content: "\f416"; } + +.fa-scribd:before { + content: "\f28a"; } + +.fa-debian:before { + content: "\e60b"; } + +.fa-openid:before { + content: "\f19b"; } + +.fa-instalod:before { + content: "\e081"; } + +.fa-expeditedssl:before { + content: "\f23e"; } + +.fa-sellcast:before { + content: "\f2da"; } + +.fa-square-twitter:before { + content: "\f081"; } + +.fa-twitter-square:before { + content: "\f081"; } + +.fa-r-project:before { + content: "\f4f7"; } + +.fa-delicious:before { + content: "\f1a5"; } + +.fa-freebsd:before { + content: "\f3a4"; } + +.fa-vuejs:before { + content: "\f41f"; } + +.fa-accusoft:before { + content: "\f369"; } + +.fa-ioxhost:before { + content: "\f208"; } + +.fa-fonticons-fi:before { + content: "\f3a2"; } + +.fa-app-store:before { + content: "\f36f"; } + +.fa-cc-mastercard:before { + content: "\f1f1"; } + +.fa-itunes-note:before { + content: "\f3b5"; } + +.fa-golang:before { + content: "\e40f"; } + +.fa-kickstarter:before { + content: "\f3bb"; } + +.fa-square-kickstarter:before { + content: "\f3bb"; } + +.fa-grav:before { + content: "\f2d6"; } + +.fa-weibo:before { + content: "\f18a"; } + +.fa-uncharted:before { + content: "\e084"; } + +.fa-firstdraft:before { + content: "\f3a1"; } + +.fa-square-youtube:before { + content: "\f431"; } + +.fa-youtube-square:before { + content: "\f431"; } + +.fa-wikipedia-w:before { + content: "\f266"; } + +.fa-wpressr:before { + content: "\f3e4"; } + +.fa-rendact:before { + content: "\f3e4"; } + +.fa-angellist:before { + content: "\f209"; } + +.fa-galactic-republic:before { + content: "\f50c"; } + +.fa-nfc-directional:before { + content: "\e530"; } + +.fa-skype:before { + content: "\f17e"; } + +.fa-joget:before { + content: "\f3b7"; } + +.fa-fedora:before { + content: "\f798"; } + +.fa-stripe-s:before { + content: "\f42a"; } + +.fa-meta:before { + content: "\e49b"; } + +.fa-laravel:before { + content: "\f3bd"; } + +.fa-hotjar:before { + content: "\f3b1"; } + +.fa-bluetooth-b:before { + content: "\f294"; } + +.fa-square-letterboxd:before { + content: "\e62e"; } + +.fa-sticker-mule:before { + content: "\f3f7"; } + +.fa-creative-commons-zero:before { + content: "\f4f3"; } + +.fa-hips:before { + content: "\f452"; } + +.fa-behance:before { + content: "\f1b4"; } + +.fa-reddit:before { + content: "\f1a1"; } + +.fa-discord:before { + content: "\f392"; } + +.fa-chrome:before { + content: "\f268"; } + +.fa-app-store-ios:before { + content: "\f370"; } + +.fa-cc-discover:before { + content: "\f1f2"; } + +.fa-wpbeginner:before { + content: "\f297"; } + +.fa-confluence:before { + content: "\f78d"; } + +.fa-shoelace:before { + content: "\e60c"; } + +.fa-mdb:before { + content: "\f8ca"; } + +.fa-dochub:before { + content: "\f394"; } + +.fa-accessible-icon:before { + content: "\f368"; } + +.fa-ebay:before { + content: "\f4f4"; } + +.fa-amazon:before { + content: "\f270"; } + +.fa-unsplash:before { + content: "\e07c"; } + +.fa-yarn:before { + content: "\f7e3"; } + +.fa-square-steam:before { + content: "\f1b7"; } + +.fa-steam-square:before { + content: "\f1b7"; } + +.fa-500px:before { + content: "\f26e"; } + +.fa-square-vimeo:before { + content: "\f194"; } + +.fa-vimeo-square:before { + content: "\f194"; } + +.fa-asymmetrik:before { + content: "\f372"; } + +.fa-font-awesome:before { + content: "\f2b4"; } + +.fa-font-awesome-flag:before { + content: "\f2b4"; } + +.fa-font-awesome-logo-full:before { + content: "\f2b4"; } + +.fa-gratipay:before { + content: "\f184"; } + +.fa-apple:before { + content: "\f179"; } + +.fa-hive:before { + content: "\e07f"; } + +.fa-gitkraken:before { + content: "\f3a6"; } + +.fa-keybase:before { + content: "\f4f5"; } + +.fa-apple-pay:before { + content: "\f415"; } + +.fa-padlet:before { + content: "\e4a0"; } + +.fa-amazon-pay:before { + content: "\f42c"; } + +.fa-square-github:before { + content: "\f092"; } + +.fa-github-square:before { + content: "\f092"; } + +.fa-stumbleupon:before { + content: "\f1a4"; } + +.fa-fedex:before { + content: "\f797"; } + +.fa-phoenix-framework:before { + content: "\f3dc"; } + +.fa-shopify:before { + content: "\e057"; } + +.fa-neos:before { + content: "\f612"; } + +.fa-square-threads:before { + content: "\e619"; } + +.fa-hackerrank:before { + content: "\f5f7"; } + +.fa-researchgate:before { + content: "\f4f8"; } + +.fa-swift:before { + content: "\f8e1"; } + +.fa-angular:before { + content: "\f420"; } + +.fa-speakap:before { + content: "\f3f3"; } + +.fa-angrycreative:before { + content: "\f36e"; } + +.fa-y-combinator:before { + content: "\f23b"; } + +.fa-empire:before { + content: "\f1d1"; } + +.fa-envira:before { + content: "\f299"; } + +.fa-google-scholar:before { + content: "\e63b"; } + +.fa-square-gitlab:before { + content: "\e5ae"; } + +.fa-gitlab-square:before { + content: "\e5ae"; } + +.fa-studiovinari:before { + content: "\f3f8"; } + +.fa-pied-piper:before { + content: "\f2ae"; } + +.fa-wordpress:before { + content: "\f19a"; } + +.fa-product-hunt:before { + content: "\f288"; } + +.fa-firefox:before { + content: "\f269"; } + +.fa-linode:before { + content: "\f2b8"; } + +.fa-goodreads:before { + content: "\f3a8"; } + +.fa-square-odnoklassniki:before { + content: "\f264"; } + +.fa-odnoklassniki-square:before { + content: "\f264"; } + +.fa-jsfiddle:before { + content: "\f1cc"; } + +.fa-sith:before { + content: "\f512"; } + +.fa-themeisle:before { + content: "\f2b2"; } + +.fa-page4:before { + content: "\f3d7"; } + +.fa-hashnode:before { + content: "\e499"; } + +.fa-react:before { + content: "\f41b"; } + +.fa-cc-paypal:before { + content: "\f1f4"; } + +.fa-squarespace:before { + content: "\f5be"; } + +.fa-cc-stripe:before { + content: "\f1f5"; } + +.fa-creative-commons-share:before { + content: "\f4f2"; } + +.fa-bitcoin:before { + content: "\f379"; } + +.fa-keycdn:before { + content: "\f3ba"; } + +.fa-opera:before { + content: "\f26a"; } + +.fa-itch-io:before { + content: "\f83a"; } + +.fa-umbraco:before { + content: "\f8e8"; } + +.fa-galactic-senate:before { + content: "\f50d"; } + +.fa-ubuntu:before { + content: "\f7df"; } + +.fa-draft2digital:before { + content: "\f396"; } + +.fa-stripe:before { + content: "\f429"; } + +.fa-houzz:before { + content: "\f27c"; } + +.fa-gg:before { + content: "\f260"; } + +.fa-dhl:before { + content: "\f790"; } + +.fa-square-pinterest:before { + content: "\f0d3"; } + +.fa-pinterest-square:before { + content: "\f0d3"; } + +.fa-xing:before { + content: "\f168"; } + +.fa-blackberry:before { + content: "\f37b"; } + +.fa-creative-commons-pd:before { + content: "\f4ec"; } + +.fa-playstation:before { + content: "\f3df"; } + +.fa-quinscape:before { + content: "\f459"; } + +.fa-less:before { + content: "\f41d"; } + +.fa-blogger-b:before { + content: "\f37d"; } + +.fa-opencart:before { + content: "\f23d"; } + +.fa-vine:before { + content: "\f1ca"; } + +.fa-signal-messenger:before { + content: "\e663"; } + +.fa-paypal:before { + content: "\f1ed"; } + +.fa-gitlab:before { + content: "\f296"; } + +.fa-typo3:before { + content: "\f42b"; } + +.fa-reddit-alien:before { + content: "\f281"; } + +.fa-yahoo:before { + content: "\f19e"; } + +.fa-dailymotion:before { + content: "\e052"; } + +.fa-affiliatetheme:before { + content: "\f36b"; } + +.fa-pied-piper-pp:before { + content: "\f1a7"; } + +.fa-bootstrap:before { + content: "\f836"; } + +.fa-odnoklassniki:before { + content: "\f263"; } + +.fa-nfc-symbol:before { + content: "\e531"; } + +.fa-mintbit:before { + content: "\e62f"; } + +.fa-ethereum:before { + content: "\f42e"; } + +.fa-speaker-deck:before { + content: "\f83c"; } + +.fa-creative-commons-nc-eu:before { + content: "\f4e9"; } + +.fa-patreon:before { + content: "\f3d9"; } + +.fa-avianex:before { + content: "\f374"; } + +.fa-ello:before { + content: "\f5f1"; } + +.fa-gofore:before { + content: "\f3a7"; } + +.fa-bimobject:before { + content: "\f378"; } + +.fa-brave-reverse:before { + content: "\e63d"; } + +.fa-facebook-f:before { + content: "\f39e"; } + +.fa-square-google-plus:before { + content: "\f0d4"; } + +.fa-google-plus-square:before { + content: "\f0d4"; } + +.fa-web-awesome:before { + content: "\e682"; } + +.fa-mandalorian:before { + content: "\f50f"; } + +.fa-first-order-alt:before { + content: "\f50a"; } + +.fa-osi:before { + content: "\f41a"; } + +.fa-google-wallet:before { + content: "\f1ee"; } + +.fa-d-and-d-beyond:before { + content: "\f6ca"; } + +.fa-periscope:before { + content: "\f3da"; } + +.fa-fulcrum:before { + content: "\f50b"; } + +.fa-cloudscale:before { + content: "\f383"; } + +.fa-forumbee:before { + content: "\f211"; } + +.fa-mizuni:before { + content: "\f3cc"; } + +.fa-schlix:before { + content: "\f3ea"; } + +.fa-square-xing:before { + content: "\f169"; } + +.fa-xing-square:before { + content: "\f169"; } + +.fa-bandcamp:before { + content: "\f2d5"; } + +.fa-wpforms:before { + content: "\f298"; } + +.fa-cloudversify:before { + content: "\f385"; } + +.fa-usps:before { + content: "\f7e1"; } + +.fa-megaport:before { + content: "\f5a3"; } + +.fa-magento:before { + content: "\f3c4"; } + +.fa-spotify:before { + content: "\f1bc"; } + +.fa-optin-monster:before { + content: "\f23c"; } + +.fa-fly:before { + content: "\f417"; } + +.fa-aviato:before { + content: "\f421"; } + +.fa-itunes:before { + content: "\f3b4"; } + +.fa-cuttlefish:before { + content: "\f38c"; } + +.fa-blogger:before { + content: "\f37c"; } + +.fa-flickr:before { + content: "\f16e"; } + +.fa-viber:before { + content: "\f409"; } + +.fa-soundcloud:before { + content: "\f1be"; } + +.fa-digg:before { + content: "\f1a6"; } + +.fa-tencent-weibo:before { + content: "\f1d5"; } + +.fa-letterboxd:before { + content: "\e62d"; } + +.fa-symfony:before { + content: "\f83d"; } + +.fa-maxcdn:before { + content: "\f136"; } + +.fa-etsy:before { + content: "\f2d7"; } + +.fa-facebook-messenger:before { + content: "\f39f"; } + +.fa-audible:before { + content: "\f373"; } + +.fa-think-peaks:before { + content: "\f731"; } + +.fa-bilibili:before { + content: "\e3d9"; } + +.fa-erlang:before { + content: "\f39d"; } + +.fa-x-twitter:before { + content: "\e61b"; } + +.fa-cotton-bureau:before { + content: "\f89e"; } + +.fa-dashcube:before { + content: "\f210"; } + +.fa-42-group:before { + content: "\e080"; } + +.fa-innosoft:before { + content: "\e080"; } + +.fa-stack-exchange:before { + content: "\f18d"; } + +.fa-elementor:before { + content: "\f430"; } + +.fa-square-pied-piper:before { + content: "\e01e"; } + +.fa-pied-piper-square:before { + content: "\e01e"; } + +.fa-creative-commons-nd:before { + content: "\f4eb"; } + +.fa-palfed:before { + content: "\f3d8"; } + +.fa-superpowers:before { + content: "\f2dd"; } + +.fa-resolving:before { + content: "\f3e7"; } + +.fa-xbox:before { + content: "\f412"; } + +.fa-square-web-awesome-stroke:before { + content: "\e684"; } + +.fa-searchengin:before { + content: "\f3eb"; } + +.fa-tiktok:before { + content: "\e07b"; } + +.fa-square-facebook:before { + content: "\f082"; } + +.fa-facebook-square:before { + content: "\f082"; } + +.fa-renren:before { + content: "\f18b"; } + +.fa-linux:before { + content: "\f17c"; } + +.fa-glide:before { + content: "\f2a5"; } + +.fa-linkedin:before { + content: "\f08c"; } + +.fa-hubspot:before { + content: "\f3b2"; } + +.fa-deploydog:before { + content: "\f38e"; } + +.fa-twitch:before { + content: "\f1e8"; } + +.fa-ravelry:before { + content: "\f2d9"; } + +.fa-mixer:before { + content: "\e056"; } + +.fa-square-lastfm:before { + content: "\f203"; } + +.fa-lastfm-square:before { + content: "\f203"; } + +.fa-vimeo:before { + content: "\f40a"; } + +.fa-mendeley:before { + content: "\f7b3"; } + +.fa-uniregistry:before { + content: "\f404"; } + +.fa-figma:before { + content: "\f799"; } + +.fa-creative-commons-remix:before { + content: "\f4ee"; } + +.fa-cc-amazon-pay:before { + content: "\f42d"; } + +.fa-dropbox:before { + content: "\f16b"; } + +.fa-instagram:before { + content: "\f16d"; } + +.fa-cmplid:before { + content: "\e360"; } + +.fa-upwork:before { + content: "\e641"; } + +.fa-facebook:before { + content: "\f09a"; } + +.fa-gripfire:before { + content: "\f3ac"; } + +.fa-jedi-order:before { + content: "\f50e"; } + +.fa-uikit:before { + content: "\f403"; } + +.fa-fort-awesome-alt:before { + content: "\f3a3"; } + +.fa-phabricator:before { + content: "\f3db"; } + +.fa-ussunnah:before { + content: "\f407"; } + +.fa-earlybirds:before { + content: "\f39a"; } + +.fa-trade-federation:before { + content: "\f513"; } + +.fa-autoprefixer:before { + content: "\f41c"; } + +.fa-whatsapp:before { + content: "\f232"; } + +.fa-square-upwork:before { + content: "\e67c"; } + +.fa-slideshare:before { + content: "\f1e7"; } + +.fa-google-play:before { + content: "\f3ab"; } + +.fa-viadeo:before { + content: "\f2a9"; } + +.fa-line:before { + content: "\f3c0"; } + +.fa-google-drive:before { + content: "\f3aa"; } + +.fa-servicestack:before { + content: "\f3ec"; } + +.fa-simplybuilt:before { + content: "\f215"; } + +.fa-bitbucket:before { + content: "\f171"; } + +.fa-imdb:before { + content: "\f2d8"; } + +.fa-deezer:before { + content: "\e077"; } + +.fa-raspberry-pi:before { + content: "\f7bb"; } + +.fa-jira:before { + content: "\f7b1"; } + +.fa-docker:before { + content: "\f395"; } + +.fa-screenpal:before { + content: "\e570"; } + +.fa-bluetooth:before { + content: "\f293"; } + +.fa-gitter:before { + content: "\f426"; } + +.fa-d-and-d:before { + content: "\f38d"; } + +.fa-microblog:before { + content: "\e01a"; } + +.fa-cc-diners-club:before { + content: "\f24c"; } + +.fa-gg-circle:before { + content: "\f261"; } + +.fa-pied-piper-hat:before { + content: "\f4e5"; } + +.fa-kickstarter-k:before { + content: "\f3bc"; } + +.fa-yandex:before { + content: "\f413"; } + +.fa-readme:before { + content: "\f4d5"; } + +.fa-html5:before { + content: "\f13b"; } + +.fa-sellsy:before { + content: "\f213"; } + +.fa-square-web-awesome:before { + content: "\e683"; } + +.fa-sass:before { + content: "\f41e"; } + +.fa-wirsindhandwerk:before { + content: "\e2d0"; } + +.fa-wsh:before { + content: "\e2d0"; } + +.fa-buromobelexperte:before { + content: "\f37f"; } + +.fa-salesforce:before { + content: "\f83b"; } + +.fa-octopus-deploy:before { + content: "\e082"; } + +.fa-medapps:before { + content: "\f3c6"; } + +.fa-ns8:before { + content: "\f3d5"; } + +.fa-pinterest-p:before { + content: "\f231"; } + +.fa-apper:before { + content: "\f371"; } + +.fa-fort-awesome:before { + content: "\f286"; } + +.fa-waze:before { + content: "\f83f"; } + +.fa-bluesky:before { + content: "\e671"; } + +.fa-cc-jcb:before { + content: "\f24b"; } + +.fa-snapchat:before { + content: "\f2ab"; } + +.fa-snapchat-ghost:before { + content: "\f2ab"; } + +.fa-fantasy-flight-games:before { + content: "\f6dc"; } + +.fa-rust:before { + content: "\e07a"; } + +.fa-wix:before { + content: "\f5cf"; } + +.fa-square-behance:before { + content: "\f1b5"; } + +.fa-behance-square:before { + content: "\f1b5"; } + +.fa-supple:before { + content: "\f3f9"; } + +.fa-webflow:before { + content: "\e65c"; } + +.fa-rebel:before { + content: "\f1d0"; } + +.fa-css3:before { + content: "\f13c"; } + +.fa-staylinked:before { + content: "\f3f5"; } + +.fa-kaggle:before { + content: "\f5fa"; } + +.fa-space-awesome:before { + content: "\e5ac"; } + +.fa-deviantart:before { + content: "\f1bd"; } + +.fa-cpanel:before { + content: "\f388"; } + +.fa-goodreads-g:before { + content: "\f3a9"; } + +.fa-square-git:before { + content: "\f1d2"; } + +.fa-git-square:before { + content: "\f1d2"; } + +.fa-square-tumblr:before { + content: "\f174"; } + +.fa-tumblr-square:before { + content: "\f174"; } + +.fa-trello:before { + content: "\f181"; } + +.fa-creative-commons-nc-jp:before { + content: "\f4ea"; } + +.fa-get-pocket:before { + content: "\f265"; } + +.fa-perbyte:before { + content: "\e083"; } + +.fa-grunt:before { + content: "\f3ad"; } + +.fa-weebly:before { + content: "\f5cc"; } + +.fa-connectdevelop:before { + content: "\f20e"; } + +.fa-leanpub:before { + content: "\f212"; } + +.fa-black-tie:before { + content: "\f27e"; } + +.fa-themeco:before { + content: "\f5c6"; } + +.fa-python:before { + content: "\f3e2"; } + +.fa-android:before { + content: "\f17b"; } + +.fa-bots:before { + content: "\e340"; } + +.fa-free-code-camp:before { + content: "\f2c5"; } + +.fa-hornbill:before { + content: "\f592"; } + +.fa-js:before { + content: "\f3b8"; } + +.fa-ideal:before { + content: "\e013"; } + +.fa-git:before { + content: "\f1d3"; } + +.fa-dev:before { + content: "\f6cc"; } + +.fa-sketch:before { + content: "\f7c6"; } + +.fa-yandex-international:before { + content: "\f414"; } + +.fa-cc-amex:before { + content: "\f1f3"; } + +.fa-uber:before { + content: "\f402"; } + +.fa-github:before { + content: "\f09b"; } + +.fa-php:before { + content: "\f457"; } + +.fa-alipay:before { + content: "\f642"; } + +.fa-youtube:before { + content: "\f167"; } + +.fa-skyatlas:before { + content: "\f216"; } + +.fa-firefox-browser:before { + content: "\e007"; } + +.fa-replyd:before { + content: "\f3e6"; } + +.fa-suse:before { + content: "\f7d6"; } + +.fa-jenkins:before { + content: "\f3b6"; } + +.fa-twitter:before { + content: "\f099"; } + +.fa-rockrms:before { + content: "\f3e9"; } + +.fa-pinterest:before { + content: "\f0d2"; } + +.fa-buffer:before { + content: "\f837"; } + +.fa-npm:before { + content: "\f3d4"; } + +.fa-yammer:before { + content: "\f840"; } + +.fa-btc:before { + content: "\f15a"; } + +.fa-dribbble:before { + content: "\f17d"; } + +.fa-stumbleupon-circle:before { + content: "\f1a3"; } + +.fa-internet-explorer:before { + content: "\f26b"; } + +.fa-stubber:before { + content: "\e5c7"; } + +.fa-telegram:before { + content: "\f2c6"; } + +.fa-telegram-plane:before { + content: "\f2c6"; } + +.fa-old-republic:before { + content: "\f510"; } + +.fa-odysee:before { + content: "\e5c6"; } + +.fa-square-whatsapp:before { + content: "\f40c"; } + +.fa-whatsapp-square:before { + content: "\f40c"; } + +.fa-node-js:before { + content: "\f3d3"; } + +.fa-edge-legacy:before { + content: "\e078"; } + +.fa-slack:before { + content: "\f198"; } + +.fa-slack-hash:before { + content: "\f198"; } + +.fa-medrt:before { + content: "\f3c8"; } + +.fa-usb:before { + content: "\f287"; } + +.fa-tumblr:before { + content: "\f173"; } + +.fa-vaadin:before { + content: "\f408"; } + +.fa-quora:before { + content: "\f2c4"; } + +.fa-square-x-twitter:before { + content: "\e61a"; } + +.fa-reacteurope:before { + content: "\f75d"; } + +.fa-medium:before { + content: "\f23a"; } + +.fa-medium-m:before { + content: "\f23a"; } + +.fa-amilia:before { + content: "\f36d"; } + +.fa-mixcloud:before { + content: "\f289"; } + +.fa-flipboard:before { + content: "\f44d"; } + +.fa-viacoin:before { + content: "\f237"; } + +.fa-critical-role:before { + content: "\f6c9"; } + +.fa-sitrox:before { + content: "\e44a"; } + +.fa-discourse:before { + content: "\f393"; } + +.fa-joomla:before { + content: "\f1aa"; } + +.fa-mastodon:before { + content: "\f4f6"; } + +.fa-airbnb:before { + content: "\f834"; } + +.fa-wolf-pack-battalion:before { + content: "\f514"; } + +.fa-buy-n-large:before { + content: "\f8a6"; } + +.fa-gulp:before { + content: "\f3ae"; } + +.fa-creative-commons-sampling-plus:before { + content: "\f4f1"; } + +.fa-strava:before { + content: "\f428"; } + +.fa-ember:before { + content: "\f423"; } + +.fa-canadian-maple-leaf:before { + content: "\f785"; } + +.fa-teamspeak:before { + content: "\f4f9"; } + +.fa-pushed:before { + content: "\f3e1"; } + +.fa-wordpress-simple:before { + content: "\f411"; } + +.fa-nutritionix:before { + content: "\f3d6"; } + +.fa-wodu:before { + content: "\e088"; } + +.fa-google-pay:before { + content: "\e079"; } + +.fa-intercom:before { + content: "\f7af"; } + +.fa-zhihu:before { + content: "\f63f"; } + +.fa-korvue:before { + content: "\f42f"; } + +.fa-pix:before { + content: "\e43a"; } + +.fa-steam-symbol:before { + content: "\f3f6"; } + +/* +Projects can override this file. For details, see: +https://www.docsy.dev/docs/adding-content/lookandfeel/#project-style-files +*/ +.td-border-top { + border: none; + border-top: 1px solid #eee; } + +.td-border-none { + border: none; } + +.td-block-padding, .td-default main section { + padding-top: 4rem; + padding-bottom: 4rem; } + @media (min-width: 768px) { + .td-block-padding, .td-default main section { + padding-top: 5rem; + padding-bottom: 5rem; } } +.td-overlay { + position: relative; } + .td-overlay::after { + content: ""; + position: absolute; + top: 0; + right: 0; + bottom: 0; + left: 0; } + .td-overlay--dark::after { + background-color: rgba(64, 63, 76, 0.3); } + .td-overlay--light::after { + background-color: rgba(211, 243, 238, 0.3); } + .td-overlay__inner { + position: relative; + z-index: 1; } + +@media (min-width: 992px) { + .td-max-width-on-larger-screens, .td-card.card, .td-card-group.card-group, .td-content > .tab-content .tab-pane { + max-width: 80%; } } + +.-bg-blue { + color: #fff; + background-color: #0d6efd; } + +.-bg-blue p:not(.p-initial) > a { + color: #81b3fe; } + .-bg-blue p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-blue { + color: #0d6efd; } + +.-bg-indigo { + color: #fff; + background-color: #6610f2; } + +.-bg-indigo p:not(.p-initial) > a { + color: #85b6fe; } + .-bg-indigo p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-indigo { + color: #6610f2; } + +.-bg-purple { + color: #fff; + background-color: #6f42c1; } + +.-bg-purple p:not(.p-initial) > a { + color: #84b5fe; } + .-bg-purple p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-purple { + color: #6f42c1; } + +.-bg-pink { + color: #fff; + background-color: #d63384; } + +.-bg-pink p:not(.p-initial) > a { + color: #81b4fe; } + .-bg-pink p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-pink { + color: #d63384; } + +.-bg-red { + color: #fff; + background-color: #dc3545; } + +.-bg-red p:not(.p-initial) > a { + color: #7db1fe; } + .-bg-red p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-red { + color: #dc3545; } + +.-bg-orange { + color: #000; + background-color: #fd7e14; } + +.-bg-orange p:not(.p-initial) > a { + color: #073b87; } + .-bg-orange p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-orange { + color: #fd7e14; } + +.-bg-yellow { + color: #000; + background-color: #ffc107; } + +.-bg-yellow p:not(.p-initial) > a { + color: #073982; } + .-bg-yellow p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-yellow { + color: #ffc107; } + +.-bg-green { + color: #fff; + background-color: #198754; } + +.-bg-green p:not(.p-initial) > a { + color: #b3d2fe; } + .-bg-green p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-green { + color: #198754; } + +.-bg-teal { + color: #000; + background-color: #20c997; } + +.-bg-teal p:not(.p-initial) > a { + color: #063274; } + .-bg-teal p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-teal { + color: #20c997; } + +.-bg-cyan { + color: #000; + background-color: #0dcaf0; } + +.-bg-cyan p:not(.p-initial) > a { + color: #06377e; } + .-bg-cyan p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-cyan { + color: #0dcaf0; } + +.-bg-black { + color: #fff; + background-color: #000; } + +.-bg-black p:not(.p-initial) > a { + color: white; } + .-bg-black p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-black { + color: #000; } + +.-bg-white { + color: #000; + background-color: #fff; } + +.-bg-white p:not(.p-initial) > a { + color: #0d6efd; } + .-bg-white p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-white { + color: #fff; } + +.-bg-gray { + color: #fff; + background-color: #6c757d; } + +.-bg-gray p:not(.p-initial) > a { + color: #90bdfe; } + .-bg-gray p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-gray { + color: #6c757d; } + +.-bg-gray-dark { + color: #fff; + background-color: #343a40; } + +.-bg-gray-dark p:not(.p-initial) > a { + color: #c8deff; } + .-bg-gray-dark p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-gray-dark { + color: #343a40; } + +.-bg-primary { + color: #000; + background-color: #189DD0; } + +.-bg-primary p:not(.p-initial) > a { + color: #063273; } + .-bg-primary p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-primary { + color: #189DD0; } + +.-bg-secondary { + color: #000; + background-color: #ffcc00; } + +.-bg-secondary p:not(.p-initial) > a { + color: #07377f; } + .-bg-secondary p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-secondary { + color: #ffcc00; } + +.-bg-success { + color: #000; + background-color: #5ca012; } + +.-bg-success p:not(.p-initial) > a { + color: #052658; } + .-bg-success p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-success { + color: #5ca012; } + +.-bg-info { + color: #fff; + background-color: #667373; } + +.-bg-info p:not(.p-initial) > a { + color: #98c1fe; } + .-bg-info p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-info { + color: #667373; } + +.-bg-warning { + color: #000; + background-color: #ed6a5a; } + +.-bg-warning p:not(.p-initial) > a { + color: #0847a2; } + .-bg-warning p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-warning { + color: #ed6a5a; } + +.-bg-danger { + color: #000; + background-color: #fe4954; } + +.-bg-danger p:not(.p-initial) > a { + color: #0847a2; } + .-bg-danger p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-danger { + color: #fe4954; } + +.-bg-light { + color: #000; + background-color: #d3f3ee; } + +.-bg-light p:not(.p-initial) > a { + color: #0c62e1; } + .-bg-light p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-light { + color: #d3f3ee; } + +.-bg-dark { + color: #fff; + background-color: #403f4c; } + +.-bg-dark p:not(.p-initial) > a { + color: #bdd7fe; } + .-bg-dark p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-dark { + color: #403f4c; } + +.-bg-100 { + color: #000; + background-color: #f8f9fa; } + +.-bg-100 p:not(.p-initial) > a { + color: #0d6bf7; } + .-bg-100 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-100 { + color: #f8f9fa; } + +.-bg-200 { + color: #000; + background-color: #e9ecef; } + +.-bg-200 p:not(.p-initial) > a { + color: #0c66ea; } + .-bg-200 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-200 { + color: #e9ecef; } + +.-bg-300 { + color: #000; + background-color: #dee2e6; } + +.-bg-300 p:not(.p-initial) > a { + color: #0c61e0; } + .-bg-300 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-300 { + color: #dee2e6; } + +.-bg-400 { + color: #000; + background-color: #ced4da; } + +.-bg-400 p:not(.p-initial) > a { + color: #0b5bd2; } + .-bg-400 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-400 { + color: #ced4da; } + +.-bg-500 { + color: #000; + background-color: #adb5bd; } + +.-bg-500 p:not(.p-initial) > a { + color: #094eb4; } + .-bg-500 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-500 { + color: #adb5bd; } + +.-bg-600 { + color: #fff; + background-color: #6c757d; } + +.-bg-600 p:not(.p-initial) > a { + color: #90bdfe; } + .-bg-600 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-600 { + color: #6c757d; } + +.-bg-700 { + color: #fff; + background-color: #495057; } + +.-bg-700 p:not(.p-initial) > a { + color: #b3d2fe; } + .-bg-700 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-700 { + color: #495057; } + +.-bg-800 { + color: #fff; + background-color: #343a40; } + +.-bg-800 p:not(.p-initial) > a { + color: #c8deff; } + .-bg-800 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-800 { + color: #343a40; } + +.-bg-900 { + color: #fff; + background-color: #212529; } + +.-bg-900 p:not(.p-initial) > a { + color: #dceaff; } + .-bg-900 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-900 { + color: #212529; } + +.-bg-0 { + color: #fff; + background-color: #403f4c; } + +.-bg-0 p:not(.p-initial) > a { + color: #bdd7fe; } + .-bg-0 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-0 { + color: #403f4c; } + +.-bg-1 { + color: #000; + background-color: #189DD0; } + +.-bg-1 p:not(.p-initial) > a { + color: #063273; } + .-bg-1 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-1 { + color: #189DD0; } + +.-bg-2 { + color: #000; + background-color: #ffcc00; } + +.-bg-2 p:not(.p-initial) > a { + color: #07377f; } + .-bg-2 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-2 { + color: #ffcc00; } + +.-bg-3 { + color: #fff; + background-color: #667373; } + +.-bg-3 p:not(.p-initial) > a { + color: #98c1fe; } + .-bg-3 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-3 { + color: #667373; } + +.-bg-4 { + color: #000; + background-color: #fff; } + +.-bg-4 p:not(.p-initial) > a { + color: #0d6efd; } + .-bg-4 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-4 { + color: #fff; } + +.-bg-5 { + color: #fff; + background-color: #6c757d; } + +.-bg-5 p:not(.p-initial) > a { + color: #90bdfe; } + .-bg-5 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-5 { + color: #6c757d; } + +.-bg-6 { + color: #000; + background-color: #5ca012; } + +.-bg-6 p:not(.p-initial) > a { + color: #052658; } + .-bg-6 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-6 { + color: #5ca012; } + +.-bg-7 { + color: #000; + background-color: #ed6a5a; } + +.-bg-7 p:not(.p-initial) > a { + color: #0847a2; } + .-bg-7 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-7 { + color: #ed6a5a; } + +.-bg-8 { + color: #fff; + background-color: #403f4c; } + +.-bg-8 p:not(.p-initial) > a { + color: #bdd7fe; } + .-bg-8 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-8 { + color: #403f4c; } + +.-bg-9 { + color: #000; + background-color: #fe4954; } + +.-bg-9 p:not(.p-initial) > a { + color: #0847a2; } + .-bg-9 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-9 { + color: #fe4954; } + +.-bg-10 { + color: #000; + background-color: #189DD0; } + +.-bg-10 p:not(.p-initial) > a { + color: #063273; } + .-bg-10 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-10 { + color: #189DD0; } + +.-bg-11 { + color: #000; + background-color: #ffcc00; } + +.-bg-11 p:not(.p-initial) > a { + color: #07377f; } + .-bg-11 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-11 { + color: #ffcc00; } + +.-bg-12 { + color: #fff; + background-color: #667373; } + +.-bg-12 p:not(.p-initial) > a { + color: #98c1fe; } + .-bg-12 p:not(.p-initial) > a:hover { + color: #094db1; } + +.-text-12 { + color: #667373; } + +.td-table:not(.td-initial), .td-box table:not(.td-initial) { + display: block; } + +.td-box--height-min { + min-height: 300px; } + +.td-box--height-med { + min-height: 400px; } + +.td-box--height-max { + min-height: 500px; } + +.td-box--height-full { + min-height: 100vh; } + +@media (min-width: 768px) { + .td-box--height-min { + min-height: 450px; } + .td-box--height-med { + min-height: 500px; } + .td-box--height-max { + min-height: 650px; } } + +.td-box .row { + padding-left: 5vw; + padding-right: 5vw; } + +.td-box.linkbox { + padding: 5vh 5vw; } + +.td-box--0 { + color: #fff; + background-color: #403f4c; } + .td-box--0 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #403f4c transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--0 p > a, .td-box--0 span > a { + color: #bdd7fe; } + .td-box--0 p > a:hover, .td-box--0 span > a:hover { + color: #d1e3fe; } + +.td-box--1 { + color: #000; + background-color: #189DD0; } + .td-box--1 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #189DD0 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--1 p > a, .td-box--1 span > a { + color: #063273; } + .td-box--1 p > a:hover, .td-box--1 span > a:hover { + color: #042351; } + +.td-box--2 { + color: #000; + background-color: #ffcc00; } + .td-box--2 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #ffcc00 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--2 p > a, .td-box--2 span > a { + color: #07377f; } + .td-box--2 p > a:hover, .td-box--2 span > a:hover { + color: #052759; } + +.td-box--3 { + color: #fff; + background-color: #667373; } + .td-box--3 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #667373 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--3 p > a, .td-box--3 span > a { + color: #98c1fe; } + .td-box--3 p > a:hover, .td-box--3 span > a:hover { + color: #b7d4fe; } + +.td-box--4 { + color: #000; + background-color: #fff; } + .td-box--4 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #fff transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--4 p > a, .td-box--4 span > a { + color: #0d6efd; } + .td-box--4 p > a:hover, .td-box--4 span > a:hover { + color: #094db1; } + +.td-box--5 { + color: #fff; + background-color: #6c757d; } + .td-box--5 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #6c757d transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--5 p > a, .td-box--5 span > a { + color: #90bdfe; } + .td-box--5 p > a:hover, .td-box--5 span > a:hover { + color: #b1d1fe; } + +.td-box--6 { + color: #000; + background-color: #5ca012; } + .td-box--6 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #5ca012 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--6 p > a, .td-box--6 span > a { + color: #052658; } + .td-box--6 p > a:hover, .td-box--6 span > a:hover { + color: #041b3e; } + +.td-box--7 { + color: #000; + background-color: #ed6a5a; } + .td-box--7 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #ed6a5a transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--7 p > a, .td-box--7 span > a { + color: #0847a2; } + .td-box--7 p > a:hover, .td-box--7 span > a:hover { + color: #063271; } + +.td-box--8 { + color: #fff; + background-color: #403f4c; } + .td-box--8 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #403f4c transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--8 p > a, .td-box--8 span > a { + color: #bdd7fe; } + .td-box--8 p > a:hover, .td-box--8 span > a:hover { + color: #d1e3fe; } + +.td-box--9 { + color: #000; + background-color: #fe4954; } + .td-box--9 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #fe4954 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--9 p > a, .td-box--9 span > a { + color: #0847a2; } + .td-box--9 p > a:hover, .td-box--9 span > a:hover { + color: #063271; } + +.td-box--10 { + color: #000; + background-color: #189DD0; } + .td-box--10 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #189DD0 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--10 p > a, .td-box--10 span > a { + color: #063273; } + .td-box--10 p > a:hover, .td-box--10 span > a:hover { + color: #042351; } + +.td-box--11 { + color: #000; + background-color: #ffcc00; } + .td-box--11 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #ffcc00 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--11 p > a, .td-box--11 span > a { + color: #07377f; } + .td-box--11 p > a:hover, .td-box--11 span > a:hover { + color: #052759; } + +.td-box--12 { + color: #fff; + background-color: #667373; } + .td-box--12 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #667373 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--12 p > a, .td-box--12 span > a { + color: #98c1fe; } + .td-box--12 p > a:hover, .td-box--12 span > a:hover { + color: #b7d4fe; } + +.td-box--blue { + color: #fff; + background-color: #0d6efd; } + .td-box--blue .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #0d6efd transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--blue p > a, .td-box--blue span > a { + color: #81b3fe; } + .td-box--blue p > a:hover, .td-box--blue span > a:hover { + color: #a7cafe; } + +.td-box--indigo { + color: #fff; + background-color: #6610f2; } + .td-box--indigo .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #6610f2 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--indigo p > a, .td-box--indigo span > a { + color: #85b6fe; } + .td-box--indigo p > a:hover, .td-box--indigo span > a:hover { + color: #aaccfe; } + +.td-box--purple { + color: #fff; + background-color: #6f42c1; } + .td-box--purple .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #6f42c1 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--purple p > a, .td-box--purple span > a { + color: #84b5fe; } + .td-box--purple p > a:hover, .td-box--purple span > a:hover { + color: #a9cbfe; } + +.td-box--pink { + color: #fff; + background-color: #d63384; } + .td-box--pink .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #d63384 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--pink p > a, .td-box--pink span > a { + color: #81b4fe; } + .td-box--pink p > a:hover, .td-box--pink span > a:hover { + color: #a7cbfe; } + +.td-box--red { + color: #fff; + background-color: #dc3545; } + .td-box--red .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #dc3545 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--red p > a, .td-box--red span > a { + color: #7db1fe; } + .td-box--red p > a:hover, .td-box--red span > a:hover { + color: #a4c8fe; } + +.td-box--orange { + color: #000; + background-color: #fd7e14; } + .td-box--orange .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #fd7e14 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--orange p > a, .td-box--orange span > a { + color: #073b87; } + .td-box--orange p > a:hover, .td-box--orange span > a:hover { + color: #05295f; } + +.td-box--yellow { + color: #000; + background-color: #ffc107; } + .td-box--yellow .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #ffc107 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--yellow p > a, .td-box--yellow span > a { + color: #073982; } + .td-box--yellow p > a:hover, .td-box--yellow span > a:hover { + color: #05285b; } + +.td-box--green { + color: #fff; + background-color: #198754; } + .td-box--green .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #198754 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--green p > a, .td-box--green span > a { + color: #b3d2fe; } + .td-box--green p > a:hover, .td-box--green span > a:hover { + color: #cae0fe; } + +.td-box--teal { + color: #000; + background-color: #20c997; } + .td-box--teal .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #20c997 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--teal p > a, .td-box--teal span > a { + color: #063274; } + .td-box--teal p > a:hover, .td-box--teal span > a:hover { + color: #042351; } + +.td-box--cyan { + color: #000; + background-color: #0dcaf0; } + .td-box--cyan .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #0dcaf0 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--cyan p > a, .td-box--cyan span > a { + color: #06377e; } + .td-box--cyan p > a:hover, .td-box--cyan span > a:hover { + color: #042758; } + +.td-box--black { + color: #fff; + background-color: #000; } + .td-box--black .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #000 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--black p > a, .td-box--black span > a { + color: white; } + .td-box--black p > a:hover, .td-box--black span > a:hover { + color: white; } + +.td-box--white { + color: #000; + background-color: #fff; } + .td-box--white .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #fff transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--white p > a, .td-box--white span > a { + color: #0d6efd; } + .td-box--white p > a:hover, .td-box--white span > a:hover { + color: #094db1; } + +.td-box--gray { + color: #fff; + background-color: #6c757d; } + .td-box--gray .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #6c757d transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--gray p > a, .td-box--gray span > a { + color: #90bdfe; } + .td-box--gray p > a:hover, .td-box--gray span > a:hover { + color: #b1d1fe; } + +.td-box--gray-dark { + color: #fff; + background-color: #343a40; } + .td-box--gray-dark .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #343a40 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--gray-dark p > a, .td-box--gray-dark span > a { + color: #c8deff; } + .td-box--gray-dark p > a:hover, .td-box--gray-dark span > a:hover { + color: #d9e8ff; } + +.td-box--primary { + color: #000; + background-color: #189DD0; } + .td-box--primary .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #189DD0 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--primary p > a, .td-box--primary span > a { + color: #063273; } + .td-box--primary p > a:hover, .td-box--primary span > a:hover { + color: #042351; } + +.td-box--secondary { + color: #000; + background-color: #ffcc00; } + .td-box--secondary .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #ffcc00 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--secondary p > a, .td-box--secondary span > a { + color: #07377f; } + .td-box--secondary p > a:hover, .td-box--secondary span > a:hover { + color: #052759; } + +.td-box--success { + color: #000; + background-color: #5ca012; } + .td-box--success .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #5ca012 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--success p > a, .td-box--success span > a { + color: #052658; } + .td-box--success p > a:hover, .td-box--success span > a:hover { + color: #041b3e; } + +.td-box--info { + color: #fff; + background-color: #667373; } + .td-box--info .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #667373 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--info p > a, .td-box--info span > a { + color: #98c1fe; } + .td-box--info p > a:hover, .td-box--info span > a:hover { + color: #b7d4fe; } + +.td-box--warning { + color: #000; + background-color: #ed6a5a; } + .td-box--warning .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #ed6a5a transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--warning p > a, .td-box--warning span > a { + color: #0847a2; } + .td-box--warning p > a:hover, .td-box--warning span > a:hover { + color: #063271; } + +.td-box--danger { + color: #000; + background-color: #fe4954; } + .td-box--danger .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #fe4954 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--danger p > a, .td-box--danger span > a { + color: #0847a2; } + .td-box--danger p > a:hover, .td-box--danger span > a:hover { + color: #063271; } + +.td-box--light { + color: #000; + background-color: #d3f3ee; } + .td-box--light .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #d3f3ee transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--light p > a, .td-box--light span > a { + color: #0c62e1; } + .td-box--light p > a:hover, .td-box--light span > a:hover { + color: #08459e; } + +.td-box--dark, .td-footer { + color: #fff; + background-color: #403f4c; } + .td-box--dark .td-arrow-down::before, .td-footer .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #403f4c transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--dark p > a, .td-footer p > a, .td-box--dark span > a, .td-footer span > a { + color: #bdd7fe; } + .td-box--dark p > a:hover, .td-footer p > a:hover, .td-box--dark span > a:hover, .td-footer span > a:hover { + color: #d1e3fe; } + +.td-box--100 { + color: #000; + background-color: #f8f9fa; } + .td-box--100 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #f8f9fa transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--100 p > a, .td-box--100 span > a { + color: #0d6bf7; } + .td-box--100 p > a:hover, .td-box--100 span > a:hover { + color: #094bad; } + +.td-box--200 { + color: #000; + background-color: #e9ecef; } + .td-box--200 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #e9ecef transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--200 p > a, .td-box--200 span > a { + color: #0c66ea; } + .td-box--200 p > a:hover, .td-box--200 span > a:hover { + color: #0847a4; } + +.td-box--300 { + color: #000; + background-color: #dee2e6; } + .td-box--300 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #dee2e6 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--300 p > a, .td-box--300 span > a { + color: #0c61e0; } + .td-box--300 p > a:hover, .td-box--300 span > a:hover { + color: #08449d; } + +.td-box--400 { + color: #000; + background-color: #ced4da; } + .td-box--400 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #ced4da transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--400 p > a, .td-box--400 span > a { + color: #0b5bd2; } + .td-box--400 p > a:hover, .td-box--400 span > a:hover { + color: #084093; } + +.td-box--500 { + color: #000; + background-color: #adb5bd; } + .td-box--500 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #adb5bd transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--500 p > a, .td-box--500 span > a { + color: #094eb4; } + .td-box--500 p > a:hover, .td-box--500 span > a:hover { + color: #06377e; } + +.td-box--600 { + color: #fff; + background-color: #6c757d; } + .td-box--600 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #6c757d transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--600 p > a, .td-box--600 span > a { + color: #90bdfe; } + .td-box--600 p > a:hover, .td-box--600 span > a:hover { + color: #b1d1fe; } + +.td-box--700 { + color: #fff; + background-color: #495057; } + .td-box--700 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #495057 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--700 p > a, .td-box--700 span > a { + color: #b3d2fe; } + .td-box--700 p > a:hover, .td-box--700 span > a:hover { + color: #cae0fe; } + +.td-box--800 { + color: #fff; + background-color: #343a40; } + .td-box--800 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #343a40 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--800 p > a, .td-box--800 span > a { + color: #c8deff; } + .td-box--800 p > a:hover, .td-box--800 span > a:hover { + color: #d9e8ff; } + +.td-box--900 { + color: #fff; + background-color: #212529; } + .td-box--900 .td-arrow-down::before { + left: 50%; + margin-left: -30px; + bottom: -25px; + border-style: solid; + border-width: 25px 30px 0 30px; + border-color: #212529 transparent transparent transparent; + z-index: 3; + position: absolute; + content: ""; } + +.td-box--900 p > a, .td-box--900 span > a { + color: #dceaff; } + .td-box--900 p > a:hover, .td-box--900 span > a:hover { + color: #e7f0ff; } + +[data-bs-theme="dark"] .td-box--white { + color: var(--bs-body-color); + background-color: var(--bs-body-bg); } + [data-bs-theme="dark"] .td-box--white p > a, [data-bs-theme="dark"] .td-box--white span > a { + color: var(--bs-link-color); } + [data-bs-theme="dark"] .td-box--white p > a:focus, [data-bs-theme="dark"] .td-box--white p > a:hover, [data-bs-theme="dark"] .td-box--white span > a:focus, [data-bs-theme="dark"] .td-box--white span > a:hover { + color: rgba(var(--bs-link-color-rgb), var(--bs-link-opacity, 1)); } + [data-bs-theme="dark"] .td-box--white .td-arrow-down::before { + border-color: var(--bs-body-bg) transparent transparent transparent; } + +.td-blog .td-rss-button { + border-radius: 2rem; + float: right; + display: none; } + +.td-blog-posts-list { + margin-top: 1.5rem !important; } + .td-blog-posts-list__item { + display: flex; + align-items: flex-start; + margin-bottom: 1.5rem !important; } + .td-blog-posts-list__item__body { + flex: 1; } + +[data-bs-theme="dark"] { + --td-pre-bg: #1b1f22; } + +.td-content .highlight { + margin: 2rem 0; + padding: 0; + position: relative; } + .td-content .highlight .click-to-copy { + display: block; + text-align: right; } + .td-content .highlight pre { + margin: 0; + padding: 1rem; + border-radius: inherit; } + .td-content .highlight pre button.td-click-to-copy { + position: absolute; + color: var(--bs-tertiary-color); + border-width: 0; + background-color: transparent; + background-image: none; + --bs-btn-box-shadow: 0; + padding: var(--bs-btn-padding-y) calc(var(--bs-btn-padding-x) / 2); + right: 4px; + top: 2px; } + .td-content .highlight pre button.td-click-to-copy:hover { + color: var(--bs-secondary-color); + background-color: var(--bs-dark-bg-subtle); } + .td-content .highlight pre button.td-click-to-copy:active { + color: var(--bs-secondary-color); + background-color: var(--bs-dark-bg-subtle); + transform: translateY(2px); } + +.td-content p code, +.td-content li > code, +.td-content table code { + color: inherit; + padding: 0.2em 0.4em; + margin: 0; + font-size: 85%; + word-break: normal; + background-color: var(--td-pre-bg); + border-radius: 0.375rem; } + .td-content p code br, + .td-content li > code br, + .td-content table code br { + display: none; } + +.td-content pre { + word-wrap: normal; + background-color: var(--td-pre-bg); + border: solid var(--bs-border-color); + border-width: 1px; + padding: 1rem; } + .td-content pre > code { + background-color: inherit !important; + padding: 0; + margin: 0; + font-size: 100%; + word-break: normal; + white-space: pre; + border: 0; } + +.td-content pre.mermaid { + background-color: inherit; + font-size: 0; + padding: 0; } + +@media (min-width: 768px) { + .td-navbar-cover { + background: transparent !important; } + .td-navbar-cover .nav-link { + text-shadow: 1px 1px 2px #403f4c; } } + +.td-navbar-cover.navbar-bg-onscroll .nav-link { + text-shadow: none; } + +.navbar-bg-onscroll { + background: #189DD0 !important; + opacity: inherit; } + +.td-navbar { + background: #189DD0; + min-height: 4rem; + margin: 0; + z-index: 32; } + .td-navbar .navbar-brand { + text-transform: none; } + .td-navbar .navbar-brand__name { + font-weight: 700; } + .td-navbar .navbar-brand svg { + display: inline-block; + margin: 0 10px; + height: 30px; } + .td-navbar .navbar-nav { + padding-top: 0.5rem; + white-space: nowrap; } + .td-navbar .nav-link { + text-transform: none; + font-weight: 700; } + .td-navbar .dropdown { + min-width: 50px; } + @media (min-width: 768px) { + .td-navbar { + position: fixed; + top: 0; + width: 100%; } + .td-navbar .nav-item { + padding-inline-end: 0.5rem; } + .td-navbar .navbar-nav { + padding-top: 0 !important; } } + @media (max-width: 991.98px) { + .td-navbar .td-navbar-nav-scroll { + max-width: 100%; + height: 2.5rem; + overflow: hidden; + font-size: 0.9rem; } + .td-navbar .navbar-brand { + margin-right: 0; } + .td-navbar .navbar-nav { + padding-bottom: 2rem; + overflow-x: auto; } } + .td-navbar .td-light-dark-menu .bi { + width: 1em; + height: 1em; + vertical-align: -.125em; + fill: currentcolor; } + @media (max-width: 991.98px) { + .td-navbar .td-light-dark-menu.dropdown { + position: unset; } } +#main_navbar li i { + padding-right: 0.5em; } + #main_navbar li i:before { + display: inline-block; + text-align: center; + min-width: 1em; } + +#main_navbar .alert { + background-color: inherit; + padding: 0; + color: #ffcc00; + border: 0; + font-weight: inherit; } + #main_navbar .alert:before { + display: inline-block; + font-style: normal; + font-variant: normal; + text-rendering: auto; + -webkit-font-smoothing: antialiased; + font-family: "Font Awesome 6 Free"; + font-weight: 900; + content: "\f0d9"; + padding-left: 0.5em; + padding-right: 0.5em; } + +nav.foldable-nav#td-section-nav { + position: relative; } + +nav.foldable-nav#td-section-nav label { + margin-bottom: 0; + width: 100%; } + +nav.foldable-nav .td-sidebar-nav__section, +nav.foldable-nav .with-child ul { + list-style: none; + padding: 0; + margin: 0; } + +nav.foldable-nav .ul-1 > li { + padding-left: 1.5em; } + +nav.foldable-nav ul.foldable { + display: none; } + +nav.foldable-nav input:checked ~ ul.foldable { + display: block; } + +nav.foldable-nav input[type="checkbox"] { + display: none; } + +nav.foldable-nav .with-child, +nav.foldable-nav .without-child { + position: relative; + padding-left: 1.5em; } + +nav.foldable-nav .ul-1 .with-child > label:before { + display: inline-block; + font-style: normal; + font-variant: normal; + text-rendering: auto; + -webkit-font-smoothing: antialiased; + font-family: "Font Awesome 6 Free"; + font-weight: 900; + content: "\f0da"; + position: absolute; + left: 0.1em; + padding-left: 0.4em; + padding-right: 0.4em; + font-size: 1em; + color: var(--bs-secondary-color); + transition: all 0.5s; } + nav.foldable-nav .ul-1 .with-child > label:before:hover { + transform: rotate(90deg); } + +nav.foldable-nav .ul-1 .with-child > input:checked ~ label:before { + color: var(--bs-secondary-color); + transform: rotate(90deg); + transition: transform 0.5s; } + +nav.foldable-nav .with-child ul { + margin-top: 0.1em; } + +@media (hover: hover) and (pointer: fine) { + nav.foldable-nav .ul-1 .with-child > label:hover:before { + color: var(--bs-link-color); + transition: color 0.3s; } + nav.foldable-nav .ul-1 .with-child > input:checked ~ label:hover:before { + color: var(--bs-link-color); + transition: color 0.3s; } } + +.td-sidebar-nav { + padding-right: 0.5rem; + margin-right: -15px; + margin-left: -15px; } + @media (min-width: 768px) { + @supports (position: sticky) { + .td-sidebar-nav { + max-height: calc(100vh - 8.5rem); + overflow-y: auto; } } } + @media (min-width: 992px) { + .td-sidebar-nav.td-sidebar-nav--search-disabled { + padding-top: 1rem; } + @supports (position: sticky) { + .td-sidebar-nav.td-sidebar-nav--search-disabled { + max-height: calc(calc(100vh - 8.5rem) + 4.5rem); } } } + @media (min-width: 768px) { + .td-sidebar-nav { + display: block !important; } } + .td-sidebar-nav__section { + padding-left: 0; } + .td-sidebar-nav__section li { + list-style: none; } + .td-sidebar-nav__section.ul-0, .td-sidebar-nav__section ul { + padding: 0; + margin: 0; } + @media (min-width: 768px) { + .td-sidebar-nav__section .ul-1 ul { + padding-left: 1.5em; } } + .td-sidebar-nav__section-title { + display: block; + font-weight: 500; } + .td-sidebar-nav__section-title .active { + font-weight: 700; } + .td-sidebar-nav__section-title a { + color: var(--bs-secondary-color); } + .td-sidebar-nav .td-sidebar-link { + display: block; + padding-bottom: 0.375rem; } + .td-sidebar-nav .td-sidebar-link__page { + color: var(--bs-body-color); + font-weight: 300; } + .td-sidebar-nav a:focus, .td-sidebar-nav a:hover { + color: var(--bs-link-color); } + .td-sidebar-nav a.active { + font-weight: 700; } + .td-sidebar-nav .dropdown a { + color: var(--bs-tertiary-color); } + .td-sidebar-nav .dropdown .nav-link { + padding: 0 0 1rem; } + .td-sidebar-nav > .td-sidebar-nav__section { + padding-left: 1.5rem; } + .td-sidebar-nav li i { + padding-right: 0.5em; } + .td-sidebar-nav li i:before { + display: inline-block; + text-align: center; + min-width: 1em; } + .td-sidebar-nav .td-sidebar-link.tree-root { + font-weight: 700; + border-bottom: 1px solid var(--bs-tertiary-color); + margin-bottom: 1rem; } + +.td-sidebar { + padding-bottom: 1rem; } + .td-sidebar a { + text-decoration: none; } + .td-sidebar a:focus, .td-sidebar a:hover { + text-decoration: initial; } + .td-sidebar .btn-link { + text-decoration: none; } + @media (min-width: 768px) { + .td-sidebar { + padding-top: 4rem; + background-color: var(--bs-body-tertiary-bg); + padding-right: 1rem; + border-right: 1px solid var(--bs-border-color); } } + .td-sidebar__toggle { + line-height: 1; + color: var(--bs-body-color); + margin: 1rem; } + .td-sidebar__search { + padding: 1rem 0; } + .td-sidebar__inner { + order: 0; } + @media (min-width: 768px) { + @supports (position: sticky) { + .td-sidebar__inner { + position: sticky; + top: 4rem; + z-index: 10; + height: calc(100vh - 5rem); } } } + @media (min-width: 1200px) { + .td-sidebar__inner { + flex: 0 1 320px; } } + .td-sidebar__inner .td-search-box { + width: 100%; } + .td-sidebar #content-desktop { + display: block; } + .td-sidebar #content-mobile { + display: none; } + @media (max-width: 991.98px) { + .td-sidebar #content-desktop { + display: none; } + .td-sidebar #content-mobile { + display: block; } } +.td-sidebar-toc { + border-left: 1px solid var(--bs-border-color); + order: 2; + padding-top: 0.75rem; + padding-bottom: 1.5rem; + vertical-align: top; } + .td-sidebar-toc a { + text-decoration: none; } + .td-sidebar-toc a:focus, .td-sidebar-toc a:hover { + text-decoration: initial; } + .td-sidebar-toc .btn-link { + text-decoration: none; } + @supports (position: sticky) { + .td-sidebar-toc { + position: sticky; + top: 4rem; + height: calc(100vh - 4rem); + overflow-y: auto; } } + .td-sidebar-toc .td-page-meta a { + display: block; + font-weight: 500; } + +.td-toc a { + display: block; + font-weight: 300; + padding-bottom: 0.25rem; } + +.td-toc li { + list-style: none; + display: block; } + +.td-toc li li { + margin-left: 0.5rem; } + +.td-toc #TableOfContents a { + color: var(--bs-secondary-color); } + .td-toc #TableOfContents a:focus, .td-toc #TableOfContents a:hover { + color: initial; } + +.td-toc ul { + padding-left: 0; } + +@media print { + .td-breadcrumbs { + display: none !important; } } + +.td-breadcrumbs .breadcrumb { + background: inherit; + padding-left: 0; + padding-top: 0; } + +.alert { + font-weight: 500; + color: inherit; + border-radius: 0; } + .alert-primary, .pageinfo-primary { + border-style: solid; + border-color: #189DD0; + border-width: 0 0 0 4px; } + .alert-primary .alert-heading, .pageinfo-primary .alert-heading { + color: #189DD0; } + .alert-secondary, .pageinfo-secondary { + border-style: solid; + border-color: #ffcc00; + border-width: 0 0 0 4px; } + .alert-secondary .alert-heading, .pageinfo-secondary .alert-heading { + color: #ffcc00; } + .alert-success, .pageinfo-success { + border-style: solid; + border-color: #5ca012; + border-width: 0 0 0 4px; } + .alert-success .alert-heading, .pageinfo-success .alert-heading { + color: #5ca012; } + .alert-info, .pageinfo-info { + border-style: solid; + border-color: #667373; + border-width: 0 0 0 4px; } + .alert-info .alert-heading, .pageinfo-info .alert-heading { + color: #667373; } + .alert-warning, .pageinfo-warning { + border-style: solid; + border-color: #ed6a5a; + border-width: 0 0 0 4px; } + .alert-warning .alert-heading, .pageinfo-warning .alert-heading { + color: #ed6a5a; } + .alert-danger, .pageinfo-danger { + border-style: solid; + border-color: #fe4954; + border-width: 0 0 0 4px; } + .alert-danger .alert-heading, .pageinfo-danger .alert-heading { + color: #fe4954; } + .alert-light, .pageinfo-light { + border-style: solid; + border-color: #d3f3ee; + border-width: 0 0 0 4px; } + .alert-light .alert-heading, .pageinfo-light .alert-heading { + color: #d3f3ee; } + .alert-dark, .pageinfo-dark { + border-style: solid; + border-color: #403f4c; + border-width: 0 0 0 4px; } + .alert-dark .alert-heading, .pageinfo-dark .alert-heading { + color: #403f4c; } + +/* + +Styles to override the theme. + +*/ +.td-navbar { + max-width: 1280px; + background: #132f48; } + +.td-navbar .navbar-brand svg { + margin: -8px 10px 0; } + +li.nav-item { + margin-bottom: 0; } + +.navbar-dark .navbar-nav .nav-link, +.navbar-dark .navbar-nav .nav-link:hover, +.navbar-dark .navbar-nav .nav-link:focus { + color: #fff; + line-height: 65px; + margin-bottom: 0; + padding: 0 10px; } + +.dropdown-toggle::after { + position: relative; + /* top: 3pt; Uncomment this to lower the icons as requested in comments*/ + content: ""; + display: inline-block; + /* By using an em scale, the arrows will size with the font */ + width: 0.5em; + height: 0.5em; + border-top: 0 none; + border-left: 0 none; + border-right: 2px solid #fff; + border-bottom: 2px solid #fff; + transform: rotate(45deg); + margin-left: 0.5rem; } + +.dropdown-menu { + position: absolute; + top: 100%; + left: 0; + z-index: 1000; + display: none; + float: left; + min-width: 16em; + padding: 0 0 1rem; + margin: 0; + font-size: 1rem; + color: #fff; + text-align: left; + list-style: none; + background-color: rgba(19, 47, 72, 0.9); + background-clip: padding-box; + border: 0px solid transparent; + border-radius: 0; + box-shadow: none; + backdrop-filter: blur(4px); } + +.dropdown-item, +.dropdown-item:hover, +.dropdown-item:focus { + display: block; + width: 100%; + padding: 0.5rem 1.5rem; + clear: both; + font-weight: 400; + color: #fff; + text-align: inherit; + white-space: nowrap; + background-color: transparent; + border: 0; } + +.dropdown-item:hover, +.dropdown-item:focus { + text-decoration: underline; + background: transparent; } + +.dropdown-menu[data-bs-popper] { + margin-top: 0; } + +@media (max-width: 992px) { + .navbar-nav .nav-item { + display: none; } } + +.header-social-wrap a { + text-decoration: none; } + +@media (min-width: 992px) { + body > header { + position: fixed; + top: 0; + width: 100%; + background: rgba(19, 47, 72, 0.9); + z-index: 1000; + min-height: 65px; + backdrop-filter: blur(4px); } + .td-navbar { + position: relative; + margin: 0 auto; + padding-left: 5px; + padding-right: 5px; + background: transparent; + min-height: 65px; + padding-top: 0; + padding-bottom: 0; } + .td-navbar-nav-scroll { + width: 100%; } + .td-navbar .navbar-brand svg { + width: 30px; + margin: -8px 10px 0 0; + height: auto; } + .td-navbar .navbar-brand span.font-weight-bold { + display: inline-block; + vertical-align: 1px; + font-size: 18px; } + .td-sidebar { + padding-top: 75px; + background-color: #e9ecf0; + padding-right: 1rem; + border-right: 1px solid #dee2e6; } + .td-sidebar-toc { + border-left: 1px solid #dee2e6; + order: 2; + padding-top: 5px; + padding-bottom: 1.5rem; + vertical-align: top; } + @supports (position: sticky) { + .td-sidebar-toc { + position: sticky; + top: 75px; + height: calc(100vh - 85px); + overflow-y: auto; } } + .header-social-wrap { + height: 65px; + display: flex; + margin-left: auto; + align-items: center; + margin-bottom: 0; } } + +footer.bg-dark { + background: #132f48 !important; } + +img { + max-width: 100%; + height: auto; } + +button { + border-radius: 50px; + background: #fe4954; + color: #fff; + padding-left: 20px; + padding-right: 20px; + border-width: 0; } + +.lead { + margin-bottom: 1em; + padding-bottom: 1em; + border-bottom: 1px solid #dee2e6; + color: rgba(85, 85, 85, 0.5); + font-weight: 300; } + +h1, .h1 { + font-size: 2.5rem; + font-weight: 300; + color: #667373; } + @media (min-width: 768px) { + h1, .h1 { + font-size: 3rem; } } +h2, .h2, h3, .h3, .td-footer__links-item { + margin-top: 1.5em; + font-weight: 700; + color: #667373; } + +li { + margin-bottom: 0.5rem; } + +table { + border-radius: 6px; + margin-bottom: 2em; + overflow: hidden; } + +td, th { + padding: 0.5rem; } + +th { + background: #667373; + color: #fff; } + +tr:nth-child(odd) { + background: #e9ecf0; } + +tr:nth-child(even) { + background-color: rgba(233, 236, 240, 0.5); } + +.feedback--title { + border-top: 1px solid #dee2e6; + padding-top: 1em; } + +.feedback--answer { + width: 4em; } + +@media (min-width: 768px) { + .td-sidebar-nav { + min-height: 100%; } } + +#m-upgrade_ebook, +#m-join_slack, +#m-clickhouse_training { + font-weight: bold; + color: #189DD0; + padding-left: 20px !important; + font-size: 15px; } + +#m-upgrade_ebook:hover span, +#m-join_slack:hover span, +#m-clickhouse_training:hover span { + text-decoration: underline; } + +#m-clickhouse_training { + background: url('data:image/svg+xml,') left 3px no-repeat transparent; + background-size: 17px; } + +#m-contact_us { + background: url('data:image/svg+xml,') left 3px no-repeat transparent; + background-size: 17px; } + +#m-join_slack { + background: url('data:image/svg+xml,') left 3px no-repeat transparent; + background-size: 17px; } + +#m-upgrade_ebook { + background: url('data:image/svg+xml,') left 3px no-repeat transparent; + background-size: 17px; } + +#m-join_slack-li { + padding-top: 20px; + border-top: 1px #189DD0 solid; + margin-top: 20px; } + +footer { + min-height: auto !important; + color: #fff; } + +footer a, footer a:hover, footer a:active { + color: #fff; } + +footer .nav li { + font-size: 14px; + line-height: 1.8; } + +footer i.fab.fa-twitter:before { + content: ' '; + width: 24px; + height: 24px; + display: inline-block; + background: url('data:image/svg+xml,') center bottom no-repeat transparent; + background-size: contain; + vertical-align: -3px; } + +footer .footer-inner { + max-width: 1280px; + margin: 0 auto; } + +.td-search { + background: transparent; + position: relative; + width: 100%; } + .td-search__icon { + display: flex; + align-items: center; + height: 100%; + position: absolute; + left: 0.75em; + pointer-events: none; } + .td-search__icon:before { + content: "\f002"; } + .td-navbar .td-search__icon { + color: inherit; } + .td-search__input { + width: 100%; + text-indent: 1.25em; } + .td-search__input:not(:focus) { + background: transparent; } + .td-search__input.form-control:focus { + border-color: #f3fafd; + box-shadow: 0 0 0 2px #74c4e3; + color: var(--bs-body-color); } + .td-navbar .td-search__input { + border: none; + color: inherit; } + .td-navbar .td-search__input::-webkit-input-placeholder { + color: inherit; } + .td-navbar .td-search__input:-moz-placeholder { + color: inherit; } + .td-navbar .td-search__input::-moz-placeholder { + color: inherit; } + .td-navbar .td-search__input:-ms-input-placeholder { + color: inherit; } + .td-search:focus-within .td-search__icon { + display: none; } + .td-search:focus-within .td-search-input { + text-indent: 0px; } + .td-search:not(:focus-within) { + color: var(--bs-secondary-color); } + +.td-sidebar .td-search--algolia { + display: block; + padding: 0 0.5rem; } + .td-sidebar .td-search--algolia > button { + margin: 0; + width: 100%; } + +.td-search--offline:focus-within .td-search__icon { + display: flex; + color: var(--bs-secondary-color); } + +.td-offline-search-results { + max-width: 90%; } + .td-offline-search-results .card { + margin-bottom: 0.5rem; } + .td-offline-search-results .card .card-header { + font-weight: bold; } + .td-offline-search-results__close-button { + float: right; } + .td-offline-search-results__close-button:after { + content: "\f00d"; } + +.td-outer { + display: flex; + flex-direction: column; + min-height: 100vh; } + +@media (min-width: 768px) { + .td-default main > section:first-of-type { + padding-top: 8rem; } } + +.td-main { + flex-grow: 1; } + +.td-404 main, +.td-main main { + padding-top: 1.5rem; + padding-bottom: 2rem; } + @media (min-width: 768px) { + .td-404 main, + .td-main main { + padding-top: 5.5rem; } } +.td-cover-block--height-min { + min-height: 300px; } + +.td-cover-block--height-med { + min-height: 400px; } + +.td-cover-block--height-max { + min-height: 500px; } + +.td-cover-block--height-full { + min-height: 100vh; } + +@media (min-width: 768px) { + .td-cover-block--height-min { + min-height: 450px; } + .td-cover-block--height-med { + min-height: 500px; } + .td-cover-block--height-max { + min-height: 650px; } } + +.td-cover-logo { + margin-right: 0.5em; } + +.td-cover-block { + position: relative; + padding-top: 5rem; + padding-bottom: 5rem; + background-repeat: no-repeat; + background-position: 50% 0; + background-size: cover; } + .td-cover-block > .byline { + position: absolute; + bottom: 2px; + right: 4px; } + +.td-bg-arrow-wrapper { + position: relative; } + +.section-index .entry { + padding: 0.75rem; } + +.section-index h5, .section-index .h5 { + margin-bottom: 0; } + .section-index h5 a, .section-index .h5 a { + font-weight: 700; } + +.section-index p { + margin-top: 0; } + +.pageinfo { + font-weight: 500; + background: var(--bs-alert-bg); + color: inherit; + margin: 2rem auto; + padding: 1.5rem; + padding-bottom: 0.5rem; } + .pageinfo-primary { + border-width: 0; } + .pageinfo-secondary { + border-width: 0; } + .pageinfo-success { + border-width: 0; } + .pageinfo-info { + border-width: 0; } + .pageinfo-warning { + border-width: 0; } + .pageinfo-danger { + border-width: 0; } + .pageinfo-light { + border-width: 0; } + .pageinfo-dark { + border-width: 0; } + +.td-page-meta__lastmod { + margin-top: 3rem !important; + padding-top: 1rem !important; } + +.taxonomy-terms-article { + width: 100%; + clear: both; + font-size: 0.8rem; } + .taxonomy-terms-article .taxonomy-title { + display: inline; + font-size: 1.25em; + height: 1em; + line-height: 1em; + margin-right: 0.5em; + padding: 0; } + +.taxonomy-terms-cloud { + width: 100%; + clear: both; + font-size: 0.8rem; } + .taxonomy-terms-cloud .taxonomy-title { + display: inline-block; + width: 100%; + font-size: 1rem; + font-weight: 700; + color: var(--bs-primary-text-emphasis); + border-bottom: 1px solid var(--bs-tertiary-color); + margin-bottom: 1em; + padding-bottom: 0.375rem; + margin-top: 1em; } + +.taxonomy-terms-page { + max-width: 800px; + margin: auto; } + .taxonomy-terms-page h1, .taxonomy-terms-page .h1 { + margin-bottom: 1em; } + .taxonomy-terms-page .taxonomy-terms-cloud { + font-size: 1em; } + .taxonomy-terms-page .taxonomy-terms-cloud li { + display: block; } + .taxonomy-terms-page .taxo-text-tags li + li::before { + content: none; } + .taxonomy-terms-page .taxo-fruits .taxonomy-count, + .taxonomy-terms-page .taxo-fruits .taxonomy-label { + display: inherit; + font-size: 1rem; + margin: 0; + padding: 0; + padding-right: 0.5em; } + .taxonomy-terms-page .taxo-fruits .taxonomy-count::before { + content: "("; } + .taxonomy-terms-page .taxo-fruits .taxonomy-count::after { + content: ")"; } + +.taxonomy-terms { + list-style: none; + margin: 0; + overflow: hidden; + padding: 0; + display: inline; } + .taxonomy-terms li { + display: inline; + overflow-wrap: break-word; + word-wrap: break-word; + -ms-word-break: break-all; + word-break: break-all; + word-break: break-word; + -ms-hyphens: auto; + -moz-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; } + +.taxonomy-count { + font-size: 0.8em; + line-height: 1.25em; + display: inline-block; + padding-left: 0.6em; + padding-right: 0.6em; + margin-left: 0.6em; + text-align: center; + border-radius: 1em; + background-color: var(--bs-body-bg); } + +.taxonomy-term { + background: var(--bs-secondary-bg); + border-width: 0; + border-radius: 0 3px 3px 0; + color: var(--bs-body-color); + display: inline-block; + font-size: 1em; + line-height: 1.5em; + min-height: 1.5em; + max-width: 100%; + padding: 0 0.5em 0 1em; + position: relative; + margin: 0 0.5em 0.2em 0; + text-decoration: none; + -webkit-transition: color 0.2s; + -webkit-clip-path: polygon(100% 0, 100% 100%, 0.8em 100%, 0 50%, 0.8em 0); + clip-path: polygon(100% 0, 100% 100%, 0.8em 100%, 0 50%, 0.8em 0); } + .taxonomy-term:hover { + background-color: var(--bs-primary-bg-subtle); + color: var(--bs-body-color-dark); } + .taxonomy-term:hover .taxonomy-count { + color: var(--bs-body-color-dark); } + .taxonomy-term:hover::before { + background: #189DD0; } + +.taxo-text-tags .taxonomy-term { + background: none; + border-width: 0; + border-radius: 0; + color: #6c757d; + font-size: 1em; + line-height: 1.5em; + min-height: 1.5em; + max-width: 100%; + padding: 0; + position: relative; + margin: 0; + text-decoration: none; + -webkit-clip-path: none; + clip-path: none; } + .taxo-text-tags .taxonomy-term:hover { + background: none; + color: #0d6efd; } + .taxo-text-tags .taxonomy-term:hover .taxonomy-count { + color: #403f4c !important; } + .taxo-text-tags .taxonomy-term:hover::before { + background: none; } + +.taxo-text-tags li + li::before { + content: "|"; + color: #6c757d; + margin-right: 0.2em; } + +.taxo-text-tags .taxonomy-count { + font-size: 1em; + line-height: 1.25em; + display: inline-block; + padding: 0; + margin: 0; + text-align: center; + border-radius: 0; + background: none; + vertical-align: super; + font-size: 0.75em; } + +.taxo-text-tags .taxonomy-term:hover .taxonomy-count { + color: #0d6efd !important; } + +.taxo-fruits .taxonomy-term[data-taxonomy-term]::before { + font-style: normal; + font-variant: normal; + text-rendering: auto; + -webkit-font-smoothing: antialiased; + font-family: "Font Awesome 6 Free"; + padding-right: 0.5em; + font-size: 2em; + min-width: 1.5em; + display: inline-block; } + +.taxo-fruits .taxonomy-term[data-taxonomy-term="apple"]::before { + content: "\f5d1"; + color: red; } + +.taxo-fruits .taxonomy-term[data-taxonomy-term="carrot"]::before { + content: "\f787"; + color: orange; } + +.taxo-fruits .taxonomy-term[data-taxonomy-term="lemon"]::before { + content: "\f094"; + color: limegreen; } + +.taxo-fruits .taxonomy-term[data-taxonomy-term="pepper"]::before { + content: "\f816"; + color: darkred; } + +.taxo-fruits .taxonomy-term { + background: none; + border-width: 0; + border-radius: 0; + color: #6c757d; + font-size: 1em; + line-height: 2.5em; + max-width: 100%; + padding: 0; + position: relative; + margin: 0; + text-decoration: none; + -webkit-clip-path: none; + clip-path: none; } + .taxo-fruits .taxonomy-term:hover { + background: none; + color: #0d6efd; } + .taxo-fruits .taxonomy-term:hover .taxonomy-count { + color: #403f4c !important; } + .taxo-fruits .taxonomy-term:hover::before { + background: none; + text-shadow: 0 0 3px #212529; } + +.taxo-fruits .taxonomy-count, +.taxo-fruits .taxonomy-label { + display: none; } + +.taxo-fruits.taxonomy-terms-article { + margin-bottom: 1rem; } + .taxo-fruits.taxonomy-terms-article .taxonomy-title { + display: none; } + +.taxonomy-taxonomy-page { + max-width: 800px; + margin: auto; } + .taxonomy-taxonomy-page h1, .taxonomy-taxonomy-page .h1 { + margin-bottom: 1em; } + +.article-meta { + margin-bottom: 1.5rem; } + +.article-teaser.article-type-docs h3 a:before, .article-teaser.article-type-docs .h3 a:before, .article-teaser.article-type-docs .td-footer__links-item a:before { + display: inline-block; + font-style: normal; + font-variant: normal; + text-rendering: auto; + -webkit-font-smoothing: antialiased; + font-family: "Font Awesome 6 Free"; + content: "\f02d"; + padding-right: 0.5em; } + +.article-teaser.article-type-blog h3 a:before, .article-teaser.article-type-blog .h3 a:before, .article-teaser.article-type-blog .td-footer__links-item a:before { + display: inline-block; + font-style: normal; + font-variant: normal; + text-rendering: auto; + -webkit-font-smoothing: antialiased; + font-family: "Font Awesome 6 Free"; + content: "\f781"; + padding-right: 0.5em; } + +.all-taxonomy-terms { + font-weight: 500; + line-height: 1.2; + font-size: 1.5rem; } + .all-taxonomy-terms:before { + display: inline-block; + font-style: normal; + font-variant: normal; + text-rendering: auto; + -webkit-font-smoothing: antialiased; + font-family: "Font Awesome 6 Free"; + content: "\f122"; + padding-right: 0.5em; } + +.article-teaser.card { + padding: 1em; + margin-bottom: 1.5em; } + +.article-teaser .breadcrumb { + margin-bottom: 0em; + font-size: 0.85rem; } + +.article-teaser .article-meta { + margin-bottom: 0em; } + +div.drawio { + display: inline-block; + position: relative; } + div.drawio button { + position: absolute; + bottom: 5px; + right: 5px; + padding: 0.4em 0.5em; + font-size: 0.8em; + display: none; } + div.drawio:hover button { + display: inline; } + +div.drawioframe { + position: fixed; + height: 100%; + width: 100%; + top: 0; + left: 0px; + z-index: 1000; + background: #000b; + border: 0; } + div.drawioframe iframe { + position: absolute; + height: 90%; + width: 90%; + top: 5%; + left: 5%; + z-index: 1010; } + +.tab-content .tab-pane { + margin-top: 0rem; + margin-bottom: 1.5rem; + border-left: var(--bs-border-width) solid var(--bs-border-color); + border-right: var(--bs-border-width) solid var(--bs-border-color); + border-bottom: var(--bs-border-width) solid var(--bs-border-color); } + .tab-content .tab-pane .highlight { + margin: 0; + border: none; + max-width: 100%; } + +.tab-body { + font-weight: 500; + background: var(--td-pre-bg); + color: var(--bs-body-color); + border-radius: 0; + padding: 1.5rem; } + .tab-body > :last-child { + margin-bottom: 0; } + .tab-body > .highlight:only-child { + margin: -1.5rem; + max-width: calc(100% + 3rem); } + .tab-body-primary { + border-style: solid; + border-color: #189DD0; } + .tab-body-secondary { + border-style: solid; + border-color: #ffcc00; } + .tab-body-success { + border-style: solid; + border-color: #5ca012; } + .tab-body-info { + border-style: solid; + border-color: #667373; } + .tab-body-warning { + border-style: solid; + border-color: #ed6a5a; } + .tab-body-danger { + border-style: solid; + border-color: #fe4954; } + .tab-body-light { + border-style: solid; + border-color: #d3f3ee; } + .tab-body-dark { + border-style: solid; + border-color: #403f4c; } + +.td-card.card .highlight { + border: none; + margin: 0; } + +.td-card .card-header.code { + background-color: var(--bs-body-bg); } + +.td-card .card-body.code { + background-color: var(--bs-body-bg); + padding: 0 0 0 1ex; } + +.td-card .card-body pre { + margin: 0; + padding: 0 1rem 1rem 1rem; } + +.swagger-ui .info .title small pre, .swagger-ui .info .title .small pre, .swagger-ui .info .title .td-footer__center pre, .swagger-ui .info .title .td-cover-block > .byline pre { + background: #7d8492; } + +.td-footer { + min-height: 150px; + padding-top: 3rem; + /* &__left { } */ } + @media (max-width: 991.98px) { + .td-footer { + min-height: 200px; } } + .td-footer__center { + text-align: center; } + .td-footer__right { + text-align: right; } + .td-footer__about { + font-size: initial; } + .td-footer__links-list { + margin-bottom: 0; } + .td-footer__links-item a { + color: inherit !important; } + .td-footer__authors, .td-footer__all_rights_reserved { + padding-left: 0.25rem; } + .td-footer__all_rights_reserved { + display: none; } + +@media (min-width: 768px) { + .td-offset-anchor:target { + display: block; + position: relative; + top: -4rem; + visibility: hidden; } + h2[id]:before, [id].h2:before, + h3[id]:before, + [id].h3:before, + [id].td-footer__links-item:before, + h4[id]:before, + [id].h4:before, + h5[id]:before, + [id].h5:before { + display: block; + content: " "; + margin-top: -5rem; + height: 5rem; + visibility: hidden; } } + +/* + +Nothing defined here. The Hugo project that uses this theme can override Bootstrap by adding a file to: + +assets/scss/_styles_project.scss + +*/ + +/*# sourceMappingURL=main.css.map */ \ No newline at end of file diff --git a/resources/_gen/assets/scss/main.scss_3f90599f3717b4a4920df16fdcadce3d.json b/resources/_gen/assets/scss/main.scss_3f90599f3717b4a4920df16fdcadce3d.json new file mode 100644 index 0000000000..a8ece4a3aa --- /dev/null +++ b/resources/_gen/assets/scss/main.scss_3f90599f3717b4a4920df16fdcadce3d.json @@ -0,0 +1 @@ +{"Target":"/scss/main.css","MediaType":"text/css","Data":{}} \ No newline at end of file diff --git a/signatures/version1/cla.json b/signatures/version1/cla.json index 6e7aafc52f..ac49233a46 100644 --- a/signatures/version1/cla.json +++ b/signatures/version1/cla.json @@ -95,6 +95,198 @@ "created_at": "2022-08-19T11:40:03Z", "repoId": 358618261, "pullRequestNo": 43 + }, + { + "name": "zifter", + "id": 1105370, + "comment_id": 1248605638, + "created_at": "2022-09-15T20:43:54Z", + "repoId": 358618261, + "pullRequestNo": 44 + }, + { + "name": "antoniovizuete", + "id": 1465370, + "comment_id": 1263796309, + "created_at": "2022-09-30T16:46:42Z", + "repoId": 358618261, + "pullRequestNo": 45 + }, + { + "name": "likaobear", + "id": 18431302, + "comment_id": 1285237155, + "created_at": "2022-10-20T09:41:35Z", + "repoId": 358618261, + "pullRequestNo": 47 + }, + { + "name": "mrkoloev", + "id": 42695638, + "comment_id": 1330433522, + "created_at": "2022-11-29T10:45:36Z", + "repoId": 358618261, + "pullRequestNo": 51 + }, + { + "name": "ntbosscher", + "id": 2487143, + "comment_id": 1472108030, + "created_at": "2023-03-16T14:39:42Z", + "repoId": 358618261, + "pullRequestNo": 55 + }, + { + "name": "Slach", + "id": 105560, + "comment_id": 1478096672, + "created_at": "2023-03-21T15:55:47Z", + "repoId": 358618261, + "pullRequestNo": 56 + }, + { + "name": "EpicStep", + "id": 36516357, + "comment_id": 1712845946, + "created_at": "2023-09-10T15:40:54Z", + "repoId": 358618261, + "pullRequestNo": 58 + }, + { + "name": "hdhoang", + "id": 12537, + "comment_id": 1853215762, + "created_at": "2023-12-13T03:37:45Z", + "repoId": 358618261, + "pullRequestNo": 64 + }, + { + "name": "DougTidwell", + "id": 2179651, + "comment_id": 1922083095, + "created_at": "2024-02-01T19:34:15Z", + "repoId": 358618261, + "pullRequestNo": 67 + }, + { + "name": "hilakashai", + "id": 73284641, + "comment_id": 1931429596, + "created_at": "2024-02-07T07:23:21Z", + "repoId": 358618261, + "pullRequestNo": 68 + }, + { + "name": "good-vi", + "id": 13942538, + "comment_id": 1931725010, + "created_at": "2024-02-07T10:20:58Z", + "repoId": 358618261, + "pullRequestNo": 69 + }, + { + "name": "mcknightd", + "id": 5289473, + "comment_id": 2091218839, + "created_at": "2024-05-02T18:21:15Z", + "repoId": 358618261, + "pullRequestNo": 77 + }, + { + "name": "ashwini-ahire7", + "id": 124853365, + "comment_id": 2139095012, + "created_at": "2024-05-30T09:03:38Z", + "repoId": 358618261, + "pullRequestNo": 81 + }, + { + "name": "realyota", + "id": 5066060, + "comment_id": 2160783994, + "created_at": "2024-06-11T13:34:03Z", + "repoId": 358618261, + "pullRequestNo": 84 + }, + { + "name": "cw5121", + "id": 33202011, + "comment_id": 2180835399, + "created_at": "2024-06-20T14:19:55Z", + "repoId": 358618261, + "pullRequestNo": 85 + }, + { + "name": "ondrej-smola", + "id": 851390, + "comment_id": 2236012751, + "created_at": "2024-07-18T09:10:02Z", + "repoId": 358618261, + "pullRequestNo": 88 + }, + { + "name": "yukha-dw", + "id": 91457362, + "comment_id": 2241906525, + "created_at": "2024-07-22T02:02:49Z", + "repoId": 358618261, + "pullRequestNo": 89 + }, + { + "name": "xc0derx", + "id": 11428624, + "comment_id": 2251412685, + "created_at": "2024-07-25T21:17:57Z", + "repoId": 358618261, + "pullRequestNo": 95 + }, + { + "name": "zwy991114", + "id": 58408909, + "comment_id": 2503833352, + "created_at": "2024-11-27T13:06:26Z", + "repoId": 358618261, + "pullRequestNo": 115 + }, + { + "name": "sphoortific", + "id": 94859985, + "comment_id": 2520482002, + "created_at": "2024-12-05T14:30:10Z", + "repoId": 358618261, + "pullRequestNo": 116 + }, + { + "name": "CaptTofu", + "id": 106670, + "comment_id": 2815621154, + "created_at": "2025-04-18T15:12:30Z", + "repoId": 358618261, + "pullRequestNo": 134 + }, + { + "name": "mkmkme", + "id": 4062234, + "comment_id": 2995993390, + "created_at": "2025-06-23T11:09:03Z", + "repoId": 358618261, + "pullRequestNo": 139 + }, + { + "name": "klaporte", + "id": 8545167, + "comment_id": 3381917859, + "created_at": "2025-10-08T14:47:18Z", + "repoId": 358618261, + "pullRequestNo": 144 + }, + { + "name": "jaitaiwan", + "id": 674765, + "comment_id": 3539912345, + "created_at": "2025-11-17T04:33:02Z", + "repoId": 358618261, + "pullRequestNo": 145 } ] } \ No newline at end of file diff --git a/static/assets/mutations4.png b/static/assets/mutations4.png new file mode 100644 index 0000000000..f2095ab0de Binary files /dev/null and b/static/assets/mutations4.png differ diff --git a/static/assets/thread_per_consumer0.png b/static/assets/thread_per_consumer0.png new file mode 100644 index 0000000000..911c2f8f86 Binary files /dev/null and b/static/assets/thread_per_consumer0.png differ diff --git a/static/assets/thread_per_consumer1.png b/static/assets/thread_per_consumer1.png new file mode 100644 index 0000000000..7128f1427e Binary files /dev/null and b/static/assets/thread_per_consumer1.png differ diff --git a/static/favicons/android-144x144.png b/static/favicons/android-144x144.png new file mode 100644 index 0000000000..09a2065ec7 Binary files /dev/null and b/static/favicons/android-144x144.png differ diff --git a/static/favicons/android-192x192.png b/static/favicons/android-192x192.png new file mode 100644 index 0000000000..522e4dbce5 Binary files /dev/null and b/static/favicons/android-192x192.png differ diff --git a/static/favicons/android-36x36.png b/static/favicons/android-36x36.png new file mode 100644 index 0000000000..5e20db791d Binary files /dev/null and b/static/favicons/android-36x36.png differ diff --git a/static/favicons/android-48x48.png b/static/favicons/android-48x48.png new file mode 100644 index 0000000000..0aecd9709c Binary files /dev/null and b/static/favicons/android-48x48.png differ diff --git a/static/favicons/android-72x72.png b/static/favicons/android-72x72.png new file mode 100644 index 0000000000..d762a91100 Binary files /dev/null and b/static/favicons/android-72x72.png differ diff --git a/static/favicons/android-96x96.png b/static/favicons/android-96x96.png new file mode 100644 index 0000000000..09f4e94664 Binary files /dev/null and b/static/favicons/android-96x96.png differ diff --git a/static/favicons/apple-touch-icon-120x120.png b/static/favicons/apple-touch-icon-120x120.png new file mode 100644 index 0000000000..1dc356f19e Binary files /dev/null and b/static/favicons/apple-touch-icon-120x120.png differ diff --git a/static/favicons/apple-touch-icon-144x144.png b/static/favicons/apple-touch-icon-144x144.png new file mode 100644 index 0000000000..09a2065ec7 Binary files /dev/null and b/static/favicons/apple-touch-icon-144x144.png differ diff --git a/static/favicons/apple-touch-icon-152x152.png b/static/favicons/apple-touch-icon-152x152.png new file mode 100644 index 0000000000..a9a85539be Binary files /dev/null and b/static/favicons/apple-touch-icon-152x152.png differ diff --git a/static/favicons/apple-touch-icon-167x167.png b/static/favicons/apple-touch-icon-167x167.png new file mode 100644 index 0000000000..b5004059d9 Binary files /dev/null and b/static/favicons/apple-touch-icon-167x167.png differ diff --git a/static/favicons/apple-touch-icon-60x60.png b/static/favicons/apple-touch-icon-60x60.png new file mode 100644 index 0000000000..3cd12229c1 Binary files /dev/null and b/static/favicons/apple-touch-icon-60x60.png differ diff --git a/static/favicons/apple-touch-icon-76x76.png b/static/favicons/apple-touch-icon-76x76.png new file mode 100644 index 0000000000..e2fe10957d Binary files /dev/null and b/static/favicons/apple-touch-icon-76x76.png differ diff --git a/static/favicons/apple-touch-icon.png b/static/favicons/apple-touch-icon.png new file mode 100644 index 0000000000..bea2184aa9 Binary files /dev/null and b/static/favicons/apple-touch-icon.png differ diff --git a/static/favicons/coast-228x228.png b/static/favicons/coast-228x228.png new file mode 100644 index 0000000000..62a7c5f481 Binary files /dev/null and b/static/favicons/coast-228x228.png differ diff --git a/static/favicons/favicon-128x128.png b/static/favicons/favicon-128x128.png new file mode 100644 index 0000000000..e7f36c9d5a Binary files /dev/null and b/static/favicons/favicon-128x128.png differ diff --git a/static/favicons/favicon-16x16.png b/static/favicons/favicon-16x16.png new file mode 100644 index 0000000000..029dde3433 Binary files /dev/null and b/static/favicons/favicon-16x16.png differ diff --git a/static/favicons/favicon-256x256.png b/static/favicons/favicon-256x256.png new file mode 100644 index 0000000000..cdefa8ec37 Binary files /dev/null and b/static/favicons/favicon-256x256.png differ diff --git a/static/favicons/favicon-32x32.png b/static/favicons/favicon-32x32.png new file mode 100644 index 0000000000..89a1bca812 Binary files /dev/null and b/static/favicons/favicon-32x32.png differ diff --git a/static/favicons/favicon-48x48.png b/static/favicons/favicon-48x48.png new file mode 100644 index 0000000000..e6d0d4cb66 Binary files /dev/null and b/static/favicons/favicon-48x48.png differ diff --git a/static/favicons/favicon-64x64.png b/static/favicons/favicon-64x64.png new file mode 100644 index 0000000000..a6a2d8a51b Binary files /dev/null and b/static/favicons/favicon-64x64.png differ diff --git a/static/favicons/favicon-72x72.png b/static/favicons/favicon-72x72.png new file mode 100644 index 0000000000..99c0da2691 Binary files /dev/null and b/static/favicons/favicon-72x72.png differ diff --git a/static/favicons/favicon-96x96.png b/static/favicons/favicon-96x96.png new file mode 100644 index 0000000000..09f4e94664 Binary files /dev/null and b/static/favicons/favicon-96x96.png differ diff --git a/static/favicons/favicon.ico b/static/favicons/favicon.ico index b72232bd82..f9ad57dfd3 100644 Binary files a/static/favicons/favicon.ico and b/static/favicons/favicon.ico differ diff --git a/static/images/general/altinity-logo_horizontal_blue_white.svg b/static/images/general/altinity-logo_horizontal_blue_white.svg new file mode 100644 index 0000000000..ae405a3bc9 --- /dev/null +++ b/static/images/general/altinity-logo_horizontal_blue_white.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/static/images/general/banners/manage-your-ch-smartly.png b/static/images/general/banners/manage-your-ch-smartly.png new file mode 100644 index 0000000000..0b75aa199e Binary files /dev/null and b/static/images/general/banners/manage-your-ch-smartly.png differ diff --git a/static/images/general/banners/slack-banner-new.png b/static/images/general/banners/slack-banner-new.png new file mode 100644 index 0000000000..a8933a32f8 Binary files /dev/null and b/static/images/general/banners/slack-banner-new.png differ diff --git a/static/images/general/soc.webp b/static/images/general/soc.webp new file mode 100644 index 0000000000..bbecbd8d69 Binary files /dev/null and b/static/images/general/soc.webp differ diff --git a/static/images/general/soc2.webp b/static/images/general/soc2.webp new file mode 100644 index 0000000000..899507cc17 Binary files /dev/null and b/static/images/general/soc2.webp differ diff --git a/static/images/hetzner-logo.svg b/static/images/hetzner-logo.svg new file mode 100644 index 0000000000..f5812ee247 --- /dev/null +++ b/static/images/hetzner-logo.svg @@ -0,0 +1 @@ +Element 1 \ No newline at end of file