From 33b934220d108dd32ee76a0e1a0fd6ddeb5dac82 Mon Sep 17 00:00:00 2001
From: Igor Lukanin
Date: Thu, 5 Sep 2024 11:12:55 +0200
Subject: [PATCH 001/415] docs: Fix percent calculations
---
docs/pages/product/apis-integrations/sql-api/query-format.mdx | 2 +-
.../pages/product/getting-started/cloud/create-data-model.mdx | 4 ++--
.../product/getting-started/databricks/create-data-model.mdx | 4 ++--
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/docs/pages/product/apis-integrations/sql-api/query-format.mdx b/docs/pages/product/apis-integrations/sql-api/query-format.mdx
index 14469daaa0450..ee22eba7afc18 100644
--- a/docs/pages/product/apis-integrations/sql-api/query-format.mdx
+++ b/docs/pages/product/apis-integrations/sql-api/query-format.mdx
@@ -284,7 +284,7 @@ cubes:
- name: completed_percentage
type: number
- sql: "({completed_count} / NULLIF({count}, 0)) * 100.0"
+ sql: "(100.0 * {CUBE.completed_count} / NULLIF({CUBE.count}, 0))"
format: percent
```
diff --git a/docs/pages/product/getting-started/cloud/create-data-model.mdx b/docs/pages/product/getting-started/cloud/create-data-model.mdx
index 440b1a0a29c4a..ea070e534e3fd 100644
--- a/docs/pages/product/getting-started/cloud/create-data-model.mdx
+++ b/docs/pages/product/getting-started/cloud/create-data-model.mdx
@@ -112,7 +112,7 @@ within the `measures` block.
```yaml
- name: completed_percentage
type: number
- sql: "({completed_count} / NULLIF({count}, 0)) * 100.0"
+ sql: "(100.0 * {CUBE.completed_count} / NULLIF({CUBE.count}, 0))"
format: percent
```
@@ -159,7 +159,7 @@ cubes:
- name: completed_percentage
type: number
- sql: "({completed_count} / NULLIF({count}, 0)) * 100.0"
+ sql: "(100.0 * {CUBE.completed_count} / NULLIF({CUBE.count}, 0))"
format: percent
```
diff --git a/docs/pages/product/getting-started/databricks/create-data-model.mdx b/docs/pages/product/getting-started/databricks/create-data-model.mdx
index 502dd0b076592..8c89d9da9d44d 100644
--- a/docs/pages/product/getting-started/databricks/create-data-model.mdx
+++ b/docs/pages/product/getting-started/databricks/create-data-model.mdx
@@ -107,7 +107,7 @@ within the `measures` block.
```yaml
- name: completed_percentage
type: number
- sql: "({completed_count} / NULLIF({count}, 0)) * 100.0"
+ sql: "(100.0 * {CUBE.completed_count} / NULLIF({CUBE.count}, 0))"
format: percent
```
@@ -154,7 +154,7 @@ cubes:
- name: completed_percentage
type: number
- sql: "({completed_count} / NULLIF({count}, 0)) * 100.0"
+ sql: "(100.0 * {CUBE.completed_count} / NULLIF({CUBE.count}, 0))"
format: percent
```
From c0466fde9b7a3834159d7ec592362edcab6d9795 Mon Sep 17 00:00:00 2001
From: Konstantin Burkalev
Date: Thu, 5 Sep 2024 22:17:48 +0300
Subject: [PATCH 002/415] fix(schema-compiler): propagate FILTER_PARAMS from
view to inner cube's SELECT (#8466)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* chore(schema-compiler): Move backAliasMembers*() from PreAggreggations to BaseQuery
* chore(schema-compiler): remove Ramda from BaseGroupFilter
* chore(schema-compiler): improve getSqlGenerator() by removing duplicate calls to getDbType()
* chore(schema-compiler): improvement in extractFilterMembers
* chore(schema-compiler): Improvement in evaluateSymbolSql()
* fix(schema-compiler): For views, propagate FILTER_PARAMS to inner cube SELECT
* chore(schema-compiler): add tests for FILTER_PARAMS propagation
* chore(schema-compiler): fix yaml-compiler tests
* fix(schema-compiler): fix backalias resolution in FILTER_PARAMS to exclude segments
---
.../src/adapter/BaseGroupFilter.ts | 4 +-
.../src/adapter/BaseQuery.js | 128 ++++++++++++++----
.../src/adapter/PreAggregations.js | 41 +-----
.../postgres/yaml-compiler.test.ts | 67 +++++----
.../test/unit/base-query.test.ts | 56 ++++++--
.../src/core/CompilerApi.js | 11 +-
6 files changed, 193 insertions(+), 114 deletions(-)
diff --git a/packages/cubejs-schema-compiler/src/adapter/BaseGroupFilter.ts b/packages/cubejs-schema-compiler/src/adapter/BaseGroupFilter.ts
index 9dd2bc613a8a4..49f17519739aa 100644
--- a/packages/cubejs-schema-compiler/src/adapter/BaseGroupFilter.ts
+++ b/packages/cubejs-schema-compiler/src/adapter/BaseGroupFilter.ts
@@ -1,5 +1,3 @@
-import R from 'ramda';
-
export class BaseGroupFilter {
protected readonly values: any;
@@ -31,7 +29,7 @@ export class BaseGroupFilter {
return null;
}
return `(${sql})`;
- }).filter(R.identity).join(` ${this.operator.toUpperCase()} `);
+ }).filter(x => x).join(` ${this.operator.toUpperCase()} `);
if (!r.length) {
return null;
diff --git a/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js b/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js
index affda97b5fece..07ad2fdc528ff 100644
--- a/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js
+++ b/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js
@@ -2180,15 +2180,16 @@ export class BaseQuery {
const memberPathArray = [cubeName, name];
const memberPath = this.cubeEvaluator.pathFromArray(memberPathArray);
let type = memberExpressionType;
- if (!type && this.cubeEvaluator.isMeasure(memberPathArray)) {
- type = 'measure';
- }
- if (!type && this.cubeEvaluator.isDimension(memberPathArray)) {
- type = 'dimension';
- }
- if (!type && this.cubeEvaluator.isSegment(memberPathArray)) {
- type = 'segment';
+ if (!type) {
+ if (this.cubeEvaluator.isMeasure(memberPathArray)) {
+ type = 'measure';
+ } else if (this.cubeEvaluator.isDimension(memberPathArray)) {
+ type = 'dimension';
+ } else if (this.cubeEvaluator.isSegment(memberPathArray)) {
+ type = 'segment';
+ }
}
+
const parentMember = this.safeEvaluateSymbolContext().currentMember;
if (this.safeEvaluateSymbolContext().memberChildren && parentMember) {
this.safeEvaluateSymbolContext().memberChildren[parentMember] = this.safeEvaluateSymbolContext().memberChildren[parentMember] || [];
@@ -2358,7 +2359,7 @@ export class BaseQuery {
/**
* Evaluate escaped SQL-alias for cube or cube's property
- * (measure, dimention).
+ * (measure, dimension).
* @param {string} cubeName
* @returns string
*/
@@ -3529,25 +3530,29 @@ export class BaseQuery {
static extractFilterMembers(filter) {
if (filter.operator === 'and' || filter.operator === 'or') {
return filter.values.map(f => BaseQuery.extractFilterMembers(f)).reduce((a, b) => ((a && b) ? { ...a, ...b } : null), {});
- } else if (filter.measure || filter.dimension) {
+ } else if (filter.measure) {
+ return {
+ [filter.measure]: true
+ };
+ } else if (filter.dimension) {
return {
- [filter.measure || filter.dimension]: true
+ [filter.dimension]: true
};
} else {
return null;
}
}
- static findAndSubTreeForFilterGroup(filter, groupMembers, newGroupFilter) {
+ static findAndSubTreeForFilterGroup(filter, groupMembers, newGroupFilter, aliases) {
if ((filter.operator === 'and' || filter.operator === 'or') && !filter.values?.length) {
return null;
}
const filterMembers = BaseQuery.extractFilterMembers(filter);
- if (filterMembers && Object.keys(filterMembers).every(m => groupMembers.indexOf(m) !== -1)) {
+ if (filterMembers && Object.keys(filterMembers).every(m => (groupMembers.indexOf(m) !== -1 || aliases.indexOf(m) !== -1))) {
return filter;
}
if (filter.operator === 'and') {
- const result = filter.values.map(f => BaseQuery.findAndSubTreeForFilterGroup(f, groupMembers, newGroupFilter)).filter(f => !!f);
+ const result = filter.values.map(f => BaseQuery.findAndSubTreeForFilterGroup(f, groupMembers, newGroupFilter, aliases)).filter(f => !!f);
if (!result.length) {
return null;
}
@@ -3572,21 +3577,30 @@ export class BaseQuery {
);
}
- static renderFilterParams(filter, filterParamArgs, allocateParam, newGroupFilter) {
+ static renderFilterParams(filter, filterParamArgs, allocateParam, newGroupFilter, aliases) {
if (!filter) {
return '1 = 1';
}
if (filter.operator === 'and' || filter.operator === 'or') {
const values = filter.values
- .map(f => BaseQuery.renderFilterParams(f, filterParamArgs, allocateParam, newGroupFilter))
+ .map(f => BaseQuery.renderFilterParams(f, filterParamArgs, allocateParam, newGroupFilter, aliases))
.map(v => ({ filterToWhere: () => v }));
return newGroupFilter({ operator: filter.operator, values }).filterToWhere();
}
- const filterParams = filter && filter.filterParams();
- const filterParamArg = filterParamArgs.filter(p => p.__member() === filter.measure || p.__member() === filter.dimension)[0];
+ const filterParams = filter.filterParams();
+ const filterParamArg = filterParamArgs.filter(p => {
+ const member = p.__member();
+ return member === filter.measure ||
+ member === filter.dimension ||
+ (aliases[member] && (
+ aliases[member] === filter.measure ||
+ aliases[member] === filter.dimension
+ ));
+ })[0];
+
if (!filterParamArg) {
throw new Error(`FILTER_PARAMS arg not found for ${filter.measure || filter.dimension}`);
}
@@ -3619,15 +3633,25 @@ export class BaseQuery {
return f.__member();
});
- const filter = BaseQuery.findAndSubTreeForFilterGroup(newGroupFilter({ operator: 'and', values: allFilters }), groupMembers, newGroupFilter);
+ const aliases = allFilters ?
+ allFilters
+ .map(v => (v.query ? v.query.allBackAliasMembersExceptSegments() : {}))
+ .reduce((a, b) => ({ ...a, ...b }), {})
+ : {};
+ const filter = BaseQuery.findAndSubTreeForFilterGroup(
+ newGroupFilter({ operator: 'and', values: allFilters }),
+ groupMembers,
+ newGroupFilter,
+ Object.values(aliases)
+ );
- return `(${BaseQuery.renderFilterParams(filter, filterParamArgs, allocateParam, newGroupFilter)})`;
+ return `(${BaseQuery.renderFilterParams(filter, filterParamArgs, allocateParam, newGroupFilter, aliases)})`;
};
}
static filterProxyFromAllFilters(allFilters, cubeEvaluator, allocateParam, newGroupFilter) {
return new Proxy({}, {
- get: (target, name) => {
+ get: (_target, name) => {
if (name === '_objectWithResolvedProperties') {
return true;
}
@@ -3644,12 +3668,28 @@ export class BaseQuery {
return cubeEvaluator.pathFromArray([cubeNameObj.cube, propertyName]);
},
toString() {
+ // Segments should be excluded because they are evaluated separately in cubeReferenceProxy
+ // In other case this falls into the recursive loop/stack exceeded caused by:
+ // collectFrom() -> traverseSymbol() -> evaluateSymbolSql() ->
+ // evaluateSql() -> resolveSymbolsCall() -> cubeReferenceProxy->toString() ->
+ // evaluateSymbolSql() -> evaluateSql()... -> and got here again
+ const aliases = allFilters ?
+ allFilters
+ .map(v => (v.query ? v.query.allBackAliasMembersExceptSegments() : {}))
+ .reduce((a, b) => ({ ...a, ...b }), {})
+ : {};
+ // Filtering aliases that somehow relate to this cube
+ const filteredAliases = Object.entries(aliases)
+ .filter(([key, value]) => key.startsWith(cubeNameObj.cube) || value.startsWith(cubeNameObj.cube))
+ .reduce((acc, [key, value]) => ({ ...acc, [key]: value }), {});
const filter = BaseQuery.findAndSubTreeForFilterGroup(
newGroupFilter({ operator: 'and', values: allFilters }),
[cubeEvaluator.pathFromArray([cubeNameObj.cube, propertyName])],
- newGroupFilter
+ newGroupFilter,
+ Object.values(filteredAliases)
);
- return `(${BaseQuery.renderFilterParams(filter, [this], allocateParam, newGroupFilter)})`;
+
+ return `(${BaseQuery.renderFilterParams(filter, [this], allocateParam, newGroupFilter, aliases)})`;
}
})
})
@@ -3657,4 +3697,46 @@ export class BaseQuery {
}
});
}
+
+ flattenAllMembers(excludeSegments = false) {
+ return R.flatten(
+ this.measures
+ .concat(this.dimensions)
+ .concat(excludeSegments ? [] : this.segments)
+ .concat(this.filters)
+ .concat(this.measureFilters)
+ .concat(this.timeDimensions)
+ .map(m => m.getMembers()),
+ );
+ }
+
+ allBackAliasMembersExceptSegments() {
+ return this.backAliasMembers(this.flattenAllMembers(true));
+ }
+
+ allBackAliasMembers() {
+ return this.backAliasMembers(this.flattenAllMembers());
+ }
+
+ backAliasMembers(members) {
+ const query = this;
+ return members.map(
+ member => {
+ const collectedMembers = query
+ .collectFrom([member], query.collectMemberNamesFor.bind(query), 'collectMemberNamesFor');
+ const memberPath = member.expressionPath();
+ let nonAliasSeen = false;
+ return collectedMembers
+ .filter(d => {
+ if (!query.cubeEvaluator.byPathAnyType(d).aliasMember) {
+ nonAliasSeen = true;
+ }
+ return !nonAliasSeen;
+ })
+ .map(d => (
+ { [query.cubeEvaluator.byPathAnyType(d).aliasMember]: memberPath }
+ )).reduce((a, b) => ({ ...a, ...b }), {});
+ }
+ ).reduce((a, b) => ({ ...a, ...b }), {});
+ }
}
diff --git a/packages/cubejs-schema-compiler/src/adapter/PreAggregations.js b/packages/cubejs-schema-compiler/src/adapter/PreAggregations.js
index bded2c1e0365d..9edc16c70ca53 100644
--- a/packages/cubejs-schema-compiler/src/adapter/PreAggregations.js
+++ b/packages/cubejs-schema-compiler/src/adapter/PreAggregations.js
@@ -157,7 +157,7 @@ export class PreAggregations {
const queryForSqlEvaluation = this.query.preAggregationQueryForSqlEvaluation(cube, preAggregation);
const partitionInvalidateKeyQueries = queryForSqlEvaluation.partitionInvalidateKeyQueries && queryForSqlEvaluation.partitionInvalidateKeyQueries(cube, preAggregation);
- const allBackAliasMembers = PreAggregations.allBackAliasMembers(this.query);
+ const allBackAliasMembers = this.query.allBackAliasMembers();
const matchedTimeDimension = preAggregation.partitionGranularity && !this.hasCumulativeMeasures &&
this.query.timeDimensions.find(td => {
@@ -292,7 +292,7 @@ export class PreAggregations {
static transformQueryToCanUseForm(query) {
const flattenDimensionMembers = this.flattenDimensionMembers(query);
const sortedDimensions = this.squashDimensions(query, flattenDimensionMembers);
- const allBackAliasMembers = this.allBackAliasMembers(query);
+ const allBackAliasMembers = query.allBackAliasMembers();
const measures = query.measures.concat(query.measureFilters);
const measurePaths = R.uniq(this.flattenMembers(measures).map(m => m.expressionPath()));
const collectLeafMeasures = query.collectLeafMeasures.bind(query);
@@ -426,31 +426,6 @@ export class PreAggregations {
);
}
- static backAliasMembers(query, members) {
- return members.map(
- member => {
- const collectedMembers = query
- .collectFrom([member], query.collectMemberNamesFor.bind(query), 'collectMemberNamesFor');
- const memberPath = member.expressionPath();
- let nonAliasSeen = false;
- return collectedMembers
- .filter(d => {
- if (!query.cubeEvaluator.byPathAnyType(d).aliasMember) {
- nonAliasSeen = true;
- }
- return !nonAliasSeen;
- })
- .map(d => (
- { [query.cubeEvaluator.byPathAnyType(d).aliasMember]: memberPath }
- )).reduce((a, b) => ({ ...a, ...b }), {});
- }
- ).reduce((a, b) => ({ ...a, ...b }), {});
- }
-
- static allBackAliasMembers(query) {
- return this.backAliasMembers(query, this.flattenAllMembers(query));
- }
-
static sortTimeDimensionsWithRollupGranularity(timeDimensions) {
return timeDimensions && R.sortBy(
R.prop(0),
@@ -750,18 +725,6 @@ export class PreAggregations {
);
}
- static flattenAllMembers(query) {
- return R.flatten(
- query.measures
- .concat(query.dimensions)
- .concat(query.segments)
- .concat(query.filters)
- .concat(query.measureFilters)
- .concat(query.timeDimensions)
- .map(m => m.getMembers()),
- );
- }
-
// eslint-disable-next-line no-unused-vars
// eslint-disable-next-line @typescript-eslint/no-unused-vars
getCubeLattice(cube, preAggregationName, preAggregation) {
diff --git a/packages/cubejs-schema-compiler/test/integration/postgres/yaml-compiler.test.ts b/packages/cubejs-schema-compiler/test/integration/postgres/yaml-compiler.test.ts
index 74fd630b1379f..f3d67dc166574 100644
--- a/packages/cubejs-schema-compiler/test/integration/postgres/yaml-compiler.test.ts
+++ b/packages/cubejs-schema-compiler/test/integration/postgres/yaml-compiler.test.ts
@@ -10,7 +10,7 @@ describe('YAMLCompiler', () => {
cubes:
- name: ActiveUsers
sql: "SELECT 1 as user_id, '2022-01-01' as timestamp"
-
+
measures:
- name: weeklyActive
sql: "{CUBE}.user_id"
@@ -62,7 +62,7 @@ cubes:
cubes:
- name: ActiveUsers
sql: "SELECT 1 as user_id, '2022-01-01' as timestamp"
-
+
measures:
- name: weeklyActive
sql: "{CUBE}.user_id"
@@ -83,7 +83,7 @@ cubes:
cubes:
- name: ActiveUsers
sql: "SELECT 1 as user_id, '2022-01-01'::timestamptz as timestamp"
-
+
measures:
- name: withFilter
sql: "{CUBE}.user_id"
@@ -126,7 +126,7 @@ cubes:
cubes:
- name: ActiveUsers
sql: "SELECT 1 as user_id, '2022-01-01' as timestamp"
-
+
measures:
- name: weeklyActive
sql: "{user_id}"
@@ -181,7 +181,7 @@ cubes:
cubes:
- name: ActiveUsers
sql: "SELECT 1 as user_id, '2022-01-01' as timestamp"
-
+
measures:
- name: weeklyActive
sql: "{CUBE.user_id}"
@@ -197,7 +197,7 @@ cubes:
- name: time
sql: "{CUBE}.timestamp"
type: time
-
+
preAggregations:
- name: main
measures:
@@ -248,7 +248,7 @@ cubes:
cubes:
- name: active_users
sql: "SELECT * FROM (SELECT 1 as user_id, '2022-01-01'::timestamptz as \\"timestamp\\") t WHERE {FILTER_PARAMS.active_users.time.filter(\\"timestamp\\")} AND {FILTER_PARAMS.active_users.time.filter(lambda a,b : f'timestamp >= {a}::timestamptz AND timestamp <= {b}::timestamptz')}"
-
+
measures:
- name: weekly_active
sql: "{CUBE.user_id}"
@@ -303,13 +303,20 @@ cubes:
const { compiler, joinGraph, cubeEvaluator } = prepareYamlCompiler(`
cubes:
- name: orders
- sql: "SELECT 1 as id, 1 as customer_id, TO_TIMESTAMP('2022-01-01', 'YYYY-MM-DD') as timestamp WHERE {FILTER_PARAMS.orders.time.filter(\\"timestamp\\")}"
-
+ sql: "SELECT *
+ FROM (
+ SELECT
+ 1 as id,
+ 1 as customer_id,
+ TO_TIMESTAMP('2022-01-01', 'YYYY-MM-DD') as timestamp
+ )
+ WHERE {FILTER_PARAMS.orders.time.filter(\\"timestamp\\")}"
+
joins:
- name: customers
sql: "{CUBE}.customer_id = {customers}.id"
relationship: many_to_one
-
+
measures:
- name: count
type: count
@@ -319,11 +326,11 @@ cubes:
sql: "{CUBE}.id"
type: string
primary_key: true
-
+
- name: time
sql: "{CUBE}.timestamp"
type: time
-
+
preAggregations:
- name: main
measures: [orders.count]
@@ -356,11 +363,11 @@ cubes:
measures:
- name: count
type: count
-
-
+
+
- name: customers
sql: "SELECT 1 as id, 'Foo' as name"
-
+
measures:
- name: count
type: count
@@ -370,11 +377,11 @@ cubes:
sql: id
type: string
primary_key: true
-
+
- name: name
sql: "{CUBE}.name"
type: string
-
+
views:
- name: line_items_view
@@ -385,13 +392,13 @@ views:
- join_path: line_items.orders
prefix: true
includes: "*"
- excludes:
+ excludes:
- count
-
+
- join_path: line_items.orders.customers
alias: aliased_customers
prefix: true
- includes:
+ includes:
- name: name
alias: full_name
`);
@@ -425,12 +432,12 @@ views:
cubes:
- name: BaseUsers
sql: "SELECT 1"
-
+
dimensions:
- name: time
sql: "{CUBE}.timestamp"
type: time
-
+
- name: ActiveUsers
sql: "SELECT 1 as user_id, '2022-01-01' as timestamp"
extends: BaseUsers
@@ -527,9 +534,9 @@ cubes:
type: string
sql: w_id
primary_key: true
-
+
joins:
-
+
- name: Z
sql: "{CUBE}.z_id = {Z}.z_id"
relationship: many_to_one
@@ -550,9 +557,9 @@ cubes:
type: string
sql: m_id
primary_key: true
-
+
joins:
-
+
- name: V
sql: "{CUBE}.v_id = {V}.v_id"
relationship: many_to_one
@@ -560,11 +567,11 @@ cubes:
- name: W
sql: "{CUBE}.w_id = {W}.w_id"
relationship: many_to_one
-
+
- name: Z
sql: >
SELECT 1 as z_id, 'US' as COUNTRY
-
+
dimensions:
- name: country
sql: "{CUBE}.COUNTRY"
@@ -574,7 +581,7 @@ cubes:
sql: "{CUBE}.z_id"
type: string
primaryKey: true
-
+
- name: V
sql: |
SELECT 1 as v_id, 1 as z_id
@@ -595,7 +602,7 @@ cubes:
views:
- name: m_view
-
+
cubes:
- join_path: M
diff --git a/packages/cubejs-schema-compiler/test/unit/base-query.test.ts b/packages/cubejs-schema-compiler/test/unit/base-query.test.ts
index cf879dd21481c..8e74bb852cda3 100644
--- a/packages/cubejs-schema-compiler/test/unit/base-query.test.ts
+++ b/packages/cubejs-schema-compiler/test/unit/base-query.test.ts
@@ -688,21 +688,30 @@ describe('SQL Generation', () => {
/** @type {Compilers} */
const compilers = prepareYamlCompiler(
createSchemaYaml({
- cubes: [
- {
- name: 'Order',
- sql: 'select * from order where {FILTER_PARAMS.Order.type.filter(\'type\')}',
- measures: [{
- name: 'count',
- type: 'count',
- }],
- dimensions: [{
- name: 'type',
- sql: 'type',
- type: 'string'
- }]
- },
- ]
+ cubes: [{
+ name: 'Order',
+ sql: 'select * from order where {FILTER_PARAMS.Order.type.filter(\'type\')}',
+ measures: [{
+ name: 'count',
+ type: 'count',
+ }],
+ dimensions: [{
+ name: 'type',
+ sql: 'type',
+ type: 'string'
+ }]
+ }],
+ views: [{
+ name: 'orders_view',
+ cubes: [{
+ join_path: 'Order',
+ prefix: true,
+ includes: [
+ 'type',
+ 'count',
+ ]
+ }]
+ }]
})
);
@@ -857,6 +866,23 @@ describe('SQL Generation', () => {
const cubeSQL = query.cubeSql('Order');
expect(cubeSQL).toMatch(/\(\s*\(.*type\s*=\s*\$\d\$.*OR.*type\s*=\s*\$\d\$.*\)\s*AND\s*\(.*type\s*=\s*\$\d\$.*OR.*type\s*=\s*\$\d\$.*\)\s*\)/);
});
+
+ it('propagate filter params from view into cube\'s query', async () => {
+ await compilers.compiler.compile();
+ const query = new BaseQuery(compilers, {
+ measures: ['orders_view.Order_count'],
+ filters: [
+ {
+ member: 'orders_view.Order_type',
+ operator: 'equals',
+ values: ['online'],
+ },
+ ],
+ });
+ const cubeSQL = query.cubeSql('Order');
+ console.log('TEST: ', cubeSQL);
+ expect(cubeSQL).toContain('select * from order where ((type = $0$))');
+ });
});
describe('FILTER_GROUP', () => {
diff --git a/packages/cubejs-server-core/src/core/CompilerApi.js b/packages/cubejs-server-core/src/core/CompilerApi.js
index 489c1816d6cb5..401e50ac5d46c 100644
--- a/packages/cubejs-server-core/src/core/CompilerApi.js
+++ b/packages/cubejs-server-core/src/core/CompilerApi.js
@@ -129,7 +129,7 @@ export class CompilerApi {
async getSqlGenerator(query, dataSource) {
const dbType = await this.getDbType(dataSource);
const compilers = await this.getCompilers({ requestId: query.requestId });
- let sqlGenerator = await this.createQueryByDataSource(compilers, query, dataSource);
+ let sqlGenerator = await this.createQueryByDataSource(compilers, query, dataSource, dbType);
if (!sqlGenerator) {
throw new Error(`Unknown dbType: ${dbType}`);
@@ -142,7 +142,8 @@ export class CompilerApi {
sqlGenerator = await this.createQueryByDataSource(
compilers,
query,
- dataSource
+ dataSource,
+ _dbType
);
if (!sqlGenerator) {
@@ -203,8 +204,10 @@ export class CompilerApi {
return cubeEvaluator.scheduledPreAggregations();
}
- async createQueryByDataSource(compilers, query, dataSource) {
- const dbType = await this.getDbType(dataSource);
+ async createQueryByDataSource(compilers, query, dataSource, dbType) {
+ if (!dbType) {
+ dbType = await this.getDbType(dataSource);
+ }
return this.createQuery(compilers, dbType, this.getDialectClass(dataSource, dbType), query);
}
From 7d537c76ab8aa1a0cae98fd7c829a6ea6863aa44 Mon Sep 17 00:00:00 2001
From: Konstantin Burkalev
Date: Fri, 6 Sep 2024 01:48:48 +0300
Subject: [PATCH 003/415] chore(schema-compiler): fix yaml compiler integration
test (#8674)
---
.../test/integration/postgres/yaml-compiler.test.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/packages/cubejs-schema-compiler/test/integration/postgres/yaml-compiler.test.ts b/packages/cubejs-schema-compiler/test/integration/postgres/yaml-compiler.test.ts
index f3d67dc166574..bd829ce6ff840 100644
--- a/packages/cubejs-schema-compiler/test/integration/postgres/yaml-compiler.test.ts
+++ b/packages/cubejs-schema-compiler/test/integration/postgres/yaml-compiler.test.ts
@@ -309,7 +309,7 @@ cubes:
1 as id,
1 as customer_id,
TO_TIMESTAMP('2022-01-01', 'YYYY-MM-DD') as timestamp
- )
+ ) sq
WHERE {FILTER_PARAMS.orders.time.filter(\\"timestamp\\")}"
joins:
From ae17e5b95764742217ba9f53412a75b0cd21ded2 Mon Sep 17 00:00:00 2001
From: Igor Lukanin
Date: Fri, 6 Sep 2024 12:21:43 +0200
Subject: [PATCH 004/415] docs: Tiny edits
---
docs/pages/guides/style-guide.mdx | 3 +--
docs/pages/product/data-modeling/syntax.mdx | 12 +++++++++---
2 files changed, 10 insertions(+), 5 deletions(-)
diff --git a/docs/pages/guides/style-guide.mdx b/docs/pages/guides/style-guide.mdx
index aeb5886c0f995..69ddbd0a6f12b 100644
--- a/docs/pages/guides/style-guide.mdx
+++ b/docs/pages/guides/style-guide.mdx
@@ -19,8 +19,7 @@ This style guide is intended to be used by:
## Syntax
-- Default to [YAML syntax][ref-syntax-model] for data modeling. Use JavaScript
- syntax for dynamic data models only.
+- Default to [YAML syntax][ref-syntax-model] for data modeling.
- Use [snake case][ref-syntax-naming] when using either YAML or JavaScript
syntax.
- Follow the recommendations on [YAML syntax][self-yaml] and [SQL
diff --git a/docs/pages/product/data-modeling/syntax.mdx b/docs/pages/product/data-modeling/syntax.mdx
index 65f3fa91eabaa..ffffadbb46487 100644
--- a/docs/pages/product/data-modeling/syntax.mdx
+++ b/docs/pages/product/data-modeling/syntax.mdx
@@ -37,7 +37,8 @@ model
Cube supports two ways to define data model files: with [YAML][wiki-yaml] or
JavaScript syntax. YAML data model files should have the `.yml` extension,
-whereas JavaScript data model files should end with `.js`.
+whereas JavaScript data model files should end with `.js`. You can mix YAML and
+JavaScript files within a single data model.
@@ -63,11 +64,15 @@ cubes:
-You define the data model statically or build [dynamic data
+You can define the data model statically or build [dynamic data
models][ref-dynamic-data-models] programmatically. YAML data models use
[Jinja and Python][ref-dynamic-data-models-jinja] whereas JavaScript data
models use [JavaScript][ref-dynamic-data-models-js].
+It is [recommended][ref-style-guide] to default to YAML syntax because of its
+simplicity and readability. However, JavaScript might provide more flexibility
+for dynamic data modeling.
+
## Naming
Common rules apply to names of entities within the data model. All names must:
@@ -586,4 +591,5 @@ defining dynamic data models.
[wiki-yaml]: https://en.wikipedia.org/wiki/YAML
[link-snowflake-listagg]: https://docs.snowflake.com/en/sql-reference/functions/listagg
[link-bigquery-stringagg]: https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-and-operators#string_agg
-[link-sql-udf]: https://en.wikipedia.org/wiki/User-defined_function#Databases
\ No newline at end of file
+[link-sql-udf]: https://en.wikipedia.org/wiki/User-defined_function#Databases
+[ref-style-guide]: /guides/style-guide
\ No newline at end of file
From 1ce30a46536a4157ffa3d136e3cdce9e72d1af23 Mon Sep 17 00:00:00 2001
From: Igor Lukanin
Date: Fri, 6 Sep 2024 12:36:10 +0200
Subject: [PATCH 005/415] docs: Add Cube Rollup London banners
---
README.md | 7 +++++++
docs/theme.config.tsx | 10 ++++++++++
2 files changed, 17 insertions(+)
diff --git a/README.md b/README.md
index 66cccc49fdf35..c10009515cbcf 100644
--- a/README.md
+++ b/README.md
@@ -2,6 +2,13 @@
+
+
+
+
+
+
+
[Website](https://cube.dev?ref=github-readme) • [Getting Started](https://cube.dev/docs/getting-started?ref=github-readme) • [Docs](https://cube.dev/docs?ref=github-readme) • [Examples](https://cube.dev/docs/examples?ref=github-readme) • [Blog](https://cube.dev/blog?ref=github-readme) • [Slack](https://slack.cube.dev?ref=github-readme) • [Twitter](https://twitter.com/the_cube_dev)
[](https://badge.fury.io/js/%40cubejs-backend%2Fserver)
diff --git a/docs/theme.config.tsx b/docs/theme.config.tsx
index 837ca242629e2..c0b9ff483de00 100644
--- a/docs/theme.config.tsx
+++ b/docs/theme.config.tsx
@@ -106,6 +106,16 @@ const config: DocsThemeConfig = {
)
+ },
+ toc: {
+ extraContent: (
+
+ )
}
};
From acbdcd2f9ef2b458b8816f3ecc8899a99210cf09 Mon Sep 17 00:00:00 2001
From: Mikhail Cheshkov
Date: Fri, 6 Sep 2024 19:05:37 +0300
Subject: [PATCH 006/415] refactor(cubesql): Use LazyLock instead of
lazy_static, move more regexps to statics (#8675)
* Replace lazy_static with LazyLock in many places
* Replace lazy_static with Once for testing logger init
* Remove unused testing logging static in config
* Move more static regexps to LazyLocks
---
packages/cubejs-backend-native/Cargo.lock | 1 -
rust/cubenativeutils/Cargo.lock | 2 -
rust/cubesql/Cargo.lock | 1 -
rust/cubesql/cubesql/Cargo.toml | 1 -
.../cubesql/src/compile/engine/df/wrapper.rs | 29 +++++++-----
.../cubesql/src/compile/engine/udf/common.rs | 11 +++--
rust/cubesql/cubesql/src/compile/parser.rs | 21 +++++----
.../cubesql/src/compile/rewrite/converter.rs | 10 ++--
.../src/compile/rewrite/rules/members.rs | 47 ++++++++++---------
rust/cubesql/cubesql/src/compile/test/mod.rs | 11 ++---
rust/cubesql/cubesql/src/config/mod.rs | 5 --
rust/cubesql/cubesql/src/lib.rs | 2 -
rust/cubesql/cubesql/src/sql/session.rs | 12 ++---
rust/cubesql/cubesql/src/telemetry/mod.rs | 14 +++---
rust/cubesqlplanner/Cargo.lock | 2 -
15 files changed, 85 insertions(+), 84 deletions(-)
diff --git a/packages/cubejs-backend-native/Cargo.lock b/packages/cubejs-backend-native/Cargo.lock
index d6c4375c21deb..039ab5a8b2430 100644
--- a/packages/cubejs-backend-native/Cargo.lock
+++ b/packages/cubejs-backend-native/Cargo.lock
@@ -788,7 +788,6 @@ dependencies = [
"futures-util",
"hashbrown 0.14.3",
"itertools",
- "lazy_static",
"log",
"lru",
"minijinja",
diff --git a/rust/cubenativeutils/Cargo.lock b/rust/cubenativeutils/Cargo.lock
index 8a7ca561e9dbd..5dc499c1c36fb 100644
--- a/rust/cubenativeutils/Cargo.lock
+++ b/rust/cubenativeutils/Cargo.lock
@@ -706,7 +706,6 @@ dependencies = [
"futures-util",
"hashbrown 0.14.5",
"itertools",
- "lazy_static",
"log",
"lru",
"minijinja",
@@ -718,7 +717,6 @@ dependencies = [
"regex",
"rust_decimal",
"serde",
- "serde_derive",
"serde_json",
"sha1_smol",
"sha2",
diff --git a/rust/cubesql/Cargo.lock b/rust/cubesql/Cargo.lock
index 447302de31405..2b7ebfb538384 100644
--- a/rust/cubesql/Cargo.lock
+++ b/rust/cubesql/Cargo.lock
@@ -775,7 +775,6 @@ dependencies = [
"hashbrown 0.14.3",
"insta",
"itertools",
- "lazy_static",
"log",
"lru",
"minijinja",
diff --git a/rust/cubesql/cubesql/Cargo.toml b/rust/cubesql/cubesql/Cargo.toml
index 2e70b15dac9dc..9244a30743bad 100644
--- a/rust/cubesql/cubesql/Cargo.toml
+++ b/rust/cubesql/cubesql/Cargo.toml
@@ -16,7 +16,6 @@ thiserror = "1.0.50"
cubeclient = { path = "../cubeclient" }
pg-srv = { path = "../pg-srv" }
sqlparser = { git = 'https://github.com/cube-js/sqlparser-rs.git', rev = "6a54d27d3b75a04b9f9cbe309a83078aa54b32fd" }
-lazy_static = "1.4.0"
base64 = "0.13.0"
tokio = { version = "^1.35", features = ["full", "rt", "tracing"] }
serde = { version = "^1.0", features = ["derive"] }
diff --git a/rust/cubesql/cubesql/src/compile/engine/df/wrapper.rs b/rust/cubesql/cubesql/src/compile/engine/df/wrapper.rs
index 869528f7602cd..1c7edf39dc6a1 100644
--- a/rust/cubesql/cubesql/src/compile/engine/df/wrapper.rs
+++ b/rust/cubesql/cubesql/src/compile/engine/df/wrapper.rs
@@ -32,8 +32,16 @@ use itertools::Itertools;
use regex::{Captures, Regex};
use serde::{Deserialize, Serialize};
use std::{
- any::Any, cmp::min, collections::HashMap, convert::TryInto, fmt, future::Future, iter,
- pin::Pin, result, sync::Arc,
+ any::Any,
+ cmp::min,
+ collections::HashMap,
+ convert::TryInto,
+ fmt,
+ future::Future,
+ iter,
+ pin::Pin,
+ result,
+ sync::{Arc, LazyLock},
};
#[derive(Debug, Clone, Deserialize)]
@@ -160,12 +168,12 @@ impl SqlQuery {
}
pub fn finalize_query(&mut self, sql_templates: Arc) -> Result<()> {
+ static REGEX: LazyLock = LazyLock::new(|| Regex::new(r"\$(\d+)\$").unwrap());
+
let mut params = Vec::new();
let mut rendered_params = HashMap::new();
- let regex = Regex::new(r"\$(\d+)\$")
- .map_err(|e| DataFusionError::Execution(format!("Can't parse regex: {}", e)))?;
let mut res = Ok(());
- let replaced_sql = regex.replace_all(self.sql.as_str(), |c: &Captures<'_>| {
+ let replaced_sql = REGEX.replace_all(self.sql.as_str(), |c: &Captures<'_>| {
let param = c.get(1).map(|x| x.as_str());
match self.render_param(sql_templates.clone(), param, &rendered_params, params.len()) {
Ok((param_index, param, push_param)) => {
@@ -260,9 +268,7 @@ pub struct SqlGenerationResult {
pub request: TransportLoadRequestQuery,
}
-lazy_static! {
- static ref DATE_PART_REGEX: Regex = Regex::new("^[A-Za-z_ ]+$").unwrap();
-}
+static DATE_PART_REGEX: LazyLock = LazyLock::new(|| Regex::new("^[A-Za-z_ ]+$").unwrap());
macro_rules! generate_sql_for_timestamp {
(@generic $value:ident, $value_block:expr, $sql_generator:expr, $sql_query:expr) => {
@@ -950,8 +956,9 @@ impl CubeScanWrapperNode {
ungrouped_scan_node: Option>,
subqueries: Arc>,
) -> result::Result<(Vec, SqlQuery), CubeError> {
- let non_id_regex = Regex::new(r"[^a-zA-Z0-9_]")
- .map_err(|e| CubeError::internal(format!("Can't parse regex: {}", e)))?;
+ static NON_ID_REGEX: LazyLock =
+ LazyLock::new(|| Regex::new(r"[^a-zA-Z0-9_]").unwrap());
+
let mut aliased_columns = Vec::new();
for original_expr in exprs {
let expr = if let Some(column_remapping) = column_remapping.as_ref() {
@@ -1001,7 +1008,7 @@ impl CubeScanWrapperNode {
let alias = if can_rename_columns {
let alias = expr_name(&expr, &schema)?;
- let mut truncated_alias = non_id_regex
+ let mut truncated_alias = NON_ID_REGEX
.replace_all(&alias, "_")
.trim_start_matches("_")
.to_lowercase();
diff --git a/rust/cubesql/cubesql/src/compile/engine/udf/common.rs b/rust/cubesql/cubesql/src/compile/engine/udf/common.rs
index 3be9c598e23fb..db8c96e279eda 100644
--- a/rust/cubesql/cubesql/src/compile/engine/udf/common.rs
+++ b/rust/cubesql/cubesql/src/compile/engine/udf/common.rs
@@ -1,4 +1,8 @@
-use std::{any::type_name, sync::Arc, thread};
+use std::{
+ any::type_name,
+ sync::{Arc, LazyLock},
+ thread,
+};
use chrono::{Datelike, Days, Duration, Months, NaiveDate, NaiveDateTime, NaiveTime};
use datafusion::{
@@ -3329,17 +3333,18 @@ pub fn create_current_setting_udf() -> ScalarUDF {
}
pub fn create_quote_ident_udf() -> ScalarUDF {
+ static RE: LazyLock = LazyLock::new(|| Regex::new(r"^[a-z_][a-z0-9_]*$").unwrap());
+
let fun = make_scalar_function(move |args: &[ArrayRef]| {
assert!(args.len() == 1);
let idents = downcast_string_arg!(args[0], "str", i32);
- let re = Regex::new(r"^[a-z_][a-z0-9_]*$").unwrap();
let result = idents
.iter()
.map(|ident| {
ident.map(|ident| {
- if re.is_match(ident) {
+ if RE.is_match(ident) {
return ident.to_string();
}
format!("\"{}\"", ident.replace("\"", "\"\""))
diff --git a/rust/cubesql/cubesql/src/compile/parser.rs b/rust/cubesql/cubesql/src/compile/parser.rs
index 53c324ed9bc10..58f79445ee7cc 100644
--- a/rust/cubesql/cubesql/src/compile/parser.rs
+++ b/rust/cubesql/cubesql/src/compile/parser.rs
@@ -1,4 +1,4 @@
-use std::collections::HashMap;
+use std::{collections::HashMap, sync::LazyLock};
use regex::Regex;
use sqlparser::{
@@ -36,9 +36,9 @@ impl Dialect for MySqlDialectWithBackTicks {
}
}
-lazy_static! {
- static ref SIGMA_WORKAROUND: Regex = Regex::new(r#"(?s)^\s*with\s+nsp\sas\s\(.*nspname\s=\s.*\),\s+tbl\sas\s\(.*relname\s=\s.*\).*select\s+attname.*from\spg_attribute.*$"#).unwrap();
-}
+static SIGMA_WORKAROUND: LazyLock = LazyLock::new(|| {
+ Regex::new(r#"(?s)^\s*with\s+nsp\sas\s\(.*nspname\s=\s.*\),\s+tbl\sas\s\(.*relname\s=\s.*\).*select\s+attname.*from\spg_attribute.*$"#).unwrap()
+});
pub fn parse_sql_to_statements(
query: &String,
@@ -118,13 +118,18 @@ pub fn parse_sql_to_statements(
// Sigma Computing WITH query workaround
// TODO: remove workaround when subquery is supported in JOIN ON conditions
let query = if SIGMA_WORKAROUND.is_match(&query) {
- let relnamespace_re = Regex::new(r#"(?s)from\spg_catalog\.pg_class\s+where\s+relname\s=\s(?P'(?:[^']|'')+'|\$\d+)\s+and\s+relnamespace\s=\s\(select\soid\sfrom\snsp\)"#).unwrap();
- let relnamespace_replaced = relnamespace_re.replace(
+ static RELNAMESPACE_RE: LazyLock = LazyLock::new(|| {
+ Regex::new(r#"(?s)from\spg_catalog\.pg_class\s+where\s+relname\s=\s(?P'(?:[^']|'')+'|\$\d+)\s+and\s+relnamespace\s=\s\(select\soid\sfrom\snsp\)"#).unwrap()
+ });
+ static ATTRELID_RE: LazyLock = LazyLock::new(|| {
+ Regex::new(r#"(?s)left\sjoin\spg_description\son\s+attrelid\s=\sobjoid\sand\s+attnum\s=\sobjsubid\s+where\s+attnum\s>\s0\s+and\s+attrelid\s=\s\(select\soid\sfrom\stbl\)"#).unwrap()
+ });
+
+ let relnamespace_replaced = RELNAMESPACE_RE.replace(
&query,
"from pg_catalog.pg_class join nsp on relnamespace = nsp.oid where relname = $relname",
);
- let attrelid_re = Regex::new(r#"(?s)left\sjoin\spg_description\son\s+attrelid\s=\sobjoid\sand\s+attnum\s=\sobjsubid\s+where\s+attnum\s>\s0\s+and\s+attrelid\s=\s\(select\soid\sfrom\stbl\)"#).unwrap();
- let attrelid_replaced = attrelid_re.replace(&relnamespace_replaced, "left join pg_description on attrelid = objoid and attnum = objsubid join tbl on attrelid = tbl.oid where attnum > 0");
+ let attrelid_replaced = ATTRELID_RE.replace(&relnamespace_replaced, "left join pg_description on attrelid = objoid and attnum = objsubid join tbl on attrelid = tbl.oid where attnum > 0");
attrelid_replaced.to_string()
} else {
query
diff --git a/rust/cubesql/cubesql/src/compile/rewrite/converter.rs b/rust/cubesql/cubesql/src/compile/rewrite/converter.rs
index f60a36b3f537d..32fb6ec754778 100644
--- a/rust/cubesql/cubesql/src/compile/rewrite/converter.rs
+++ b/rust/cubesql/cubesql/src/compile/rewrite/converter.rs
@@ -59,7 +59,7 @@ use std::{
collections::{HashMap, HashSet},
env,
ops::Index,
- sync::Arc,
+ sync::{Arc, LazyLock},
};
pub use super::rewriter::CubeRunner;
@@ -170,8 +170,8 @@ macro_rules! add_plan_list_node {
}};
}
-lazy_static! {
- static ref EXCLUDED_PARAM_VALUES: HashSet = vec![
+static EXCLUDED_PARAM_VALUES: LazyLock> = LazyLock::new(|| {
+ vec![
ScalarValue::Utf8(Some("second".to_string())),
ScalarValue::Utf8(Some("minute".to_string())),
ScalarValue::Utf8(Some("hour".to_string())),
@@ -182,8 +182,8 @@ lazy_static! {
]
.into_iter()
.chain((0..50).map(|i| ScalarValue::Int64(Some(i))))
- .collect();
-}
+ .collect()
+});
pub struct LogicalPlanToLanguageConverter {
graph: EGraph,
diff --git a/rust/cubesql/cubesql/src/compile/rewrite/rules/members.rs b/rust/cubesql/cubesql/src/compile/rewrite/rules/members.rs
index 291b7d32a352a..79fa094a995ea 100644
--- a/rust/cubesql/cubesql/src/compile/rewrite/rules/members.rs
+++ b/rust/cubesql/cubesql/src/compile/rewrite/rules/members.rs
@@ -46,7 +46,7 @@ use std::{
collections::{HashMap, HashSet},
fmt::Display,
ops::{Index, IndexMut},
- sync::Arc,
+ sync::{Arc, LazyLock},
};
pub struct MemberRules {
@@ -2857,27 +2857,30 @@ pub fn add_member_error(
]))
}
-lazy_static! {
- static ref STANDARD_GRANULARITIES_PARENTS: HashMap<&'static str, Vec<&'static str>> = [
- (
- "year",
- vec!["year", "quarter", "month", "day", "hour", "minute", "second"]
- ),
- (
- "quarter",
- vec!["quarter", "month", "day", "hour", "minute", "second"]
- ),
- ("month", vec!["month", "day", "hour", "minute", "second"]),
- ("week", vec!["week", "day", "hour", "minute", "second"]),
- ("day", vec!["day", "hour", "minute", "second"]),
- ("hour", vec!["hour", "minute", "second"]),
- ("minute", vec!["minute", "second"]),
- ("second", vec!["second"]),
- ]
- .iter()
- .cloned()
- .collect();
-}
+static STANDARD_GRANULARITIES_PARENTS: LazyLock>> =
+ LazyLock::new(|| {
+ [
+ (
+ "year",
+ vec![
+ "year", "quarter", "month", "day", "hour", "minute", "second",
+ ],
+ ),
+ (
+ "quarter",
+ vec!["quarter", "month", "day", "hour", "minute", "second"],
+ ),
+ ("month", vec!["month", "day", "hour", "minute", "second"]),
+ ("week", vec!["week", "day", "hour", "minute", "second"]),
+ ("day", vec!["day", "hour", "minute", "second"]),
+ ("hour", vec!["hour", "minute", "second"]),
+ ("minute", vec!["minute", "second"]),
+ ("second", vec!["second"]),
+ ]
+ .iter()
+ .cloned()
+ .collect()
+ });
pub fn min_granularity(granularity_a: &String, granularity_b: &String) -> Option {
let granularity_a = granularity_a.to_lowercase();
diff --git a/rust/cubesql/cubesql/src/compile/test/mod.rs b/rust/cubesql/cubesql/src/compile/test/mod.rs
index 4b662f89ed15e..61d04bf76e3b7 100644
--- a/rust/cubesql/cubesql/src/compile/test/mod.rs
+++ b/rust/cubesql/cubesql/src/compile/test/mod.rs
@@ -915,14 +915,10 @@ impl TestContext {
}
}
-lazy_static! {
- pub static ref TEST_LOGGING_INITIALIZED: std::sync::RwLock =
- std::sync::RwLock::new(false);
-}
+static TEST_LOGGING_INITIALIZED: std::sync::Once = std::sync::Once::new();
pub fn init_testing_logger() {
- let mut initialized = TEST_LOGGING_INITIALIZED.write().unwrap();
- if !*initialized {
+ TEST_LOGGING_INITIALIZED.call_once(|| {
let log_level = log::Level::Trace;
let logger = simple_logger::SimpleLogger::new()
.with_level(log::Level::Error.to_level_filter())
@@ -933,8 +929,7 @@ pub fn init_testing_logger() {
log::set_boxed_logger(Box::new(logger)).unwrap();
log::set_max_level(log_level.to_level_filter());
- *initialized = true;
- }
+ });
}
pub async fn convert_select_to_query_plan_customized(
diff --git a/rust/cubesql/cubesql/src/config/mod.rs b/rust/cubesql/cubesql/src/config/mod.rs
index a314c5841c7f4..1f73528e00df1 100644
--- a/rust/cubesql/cubesql/src/config/mod.rs
+++ b/rust/cubesql/cubesql/src/config/mod.rs
@@ -237,11 +237,6 @@ impl ConfigObj for ConfigObjImpl {
}
}
-lazy_static! {
- pub static ref TEST_LOGGING_INITIALIZED: tokio::sync::RwLock =
- tokio::sync::RwLock::new(false);
-}
-
impl Config {
pub fn default() -> Config {
Config {
diff --git a/rust/cubesql/cubesql/src/lib.rs b/rust/cubesql/cubesql/src/lib.rs
index 1252d2bc77e63..ae7f986c9256c 100644
--- a/rust/cubesql/cubesql/src/lib.rs
+++ b/rust/cubesql/cubesql/src/lib.rs
@@ -15,8 +15,6 @@
// trace_macros!(false);
-#[macro_use]
-extern crate lazy_static;
extern crate core;
pub mod compile;
diff --git a/rust/cubesql/cubesql/src/sql/session.rs b/rust/cubesql/cubesql/src/sql/session.rs
index a265722e2956f..75a4541ffce85 100644
--- a/rust/cubesql/cubesql/src/sql/session.rs
+++ b/rust/cubesql/cubesql/src/sql/session.rs
@@ -3,7 +3,7 @@ use log::trace;
use rand::Rng;
use std::{
collections::HashMap,
- sync::{Arc, RwLock as RwLockSync, Weak},
+ sync::{Arc, LazyLock, RwLock as RwLockSync, Weak},
time::{Duration, SystemTime},
};
use tokio_util::sync::CancellationToken;
@@ -23,8 +23,6 @@ use crate::{
RWLockAsync,
};
-extern crate lazy_static;
-
#[derive(Debug, Clone)]
pub struct SessionProperties {
user: Option,
@@ -37,10 +35,10 @@ impl SessionProperties {
}
}
-lazy_static! {
- static ref POSTGRES_DEFAULT_VARIABLES: DatabaseVariables = postgres_default_session_variables();
- static ref MYSQL_DEFAULT_VARIABLES: DatabaseVariables = mysql_default_session_variables();
-}
+static POSTGRES_DEFAULT_VARIABLES: LazyLock =
+ LazyLock::new(postgres_default_session_variables);
+static MYSQL_DEFAULT_VARIABLES: LazyLock =
+ LazyLock::new(mysql_default_session_variables);
#[derive(Debug)]
pub enum TransactionState {
diff --git a/rust/cubesql/cubesql/src/telemetry/mod.rs b/rust/cubesql/cubesql/src/telemetry/mod.rs
index 76eaa795e8c8b..5fc813889dc39 100644
--- a/rust/cubesql/cubesql/src/telemetry/mod.rs
+++ b/rust/cubesql/cubesql/src/telemetry/mod.rs
@@ -1,12 +1,14 @@
use crate::{compile::DatabaseProtocolDetails, sql::SessionState, CubeError};
use arc_swap::ArcSwap;
use log::{Level, LevelFilter};
-use std::{collections::HashMap, fmt::Debug, sync::Arc};
-
-lazy_static! {
- static ref REPORTER: ArcSwap> =
- ArcSwap::from_pointee(Box::new(LocalReporter::new()));
-}
+use std::{
+ collections::HashMap,
+ fmt::Debug,
+ sync::{Arc, LazyLock},
+};
+
+static REPORTER: LazyLock>> =
+ LazyLock::new(|| ArcSwap::from_pointee(Box::new(LocalReporter::new())));
pub trait LogReporter: Send + Sync + Debug {
fn log(&self, event: String, properties: HashMap, level: Level);
diff --git a/rust/cubesqlplanner/Cargo.lock b/rust/cubesqlplanner/Cargo.lock
index c5e3de6e44247..30082adcc325e 100644
--- a/rust/cubesqlplanner/Cargo.lock
+++ b/rust/cubesqlplanner/Cargo.lock
@@ -714,7 +714,6 @@ dependencies = [
"futures-util",
"hashbrown 0.14.5",
"itertools",
- "lazy_static",
"log",
"lru",
"minijinja",
@@ -726,7 +725,6 @@ dependencies = [
"regex",
"rust_decimal",
"serde",
- "serde_derive",
"serde_json",
"sha1_smol",
"sha2",
From 46b3a36936f0f00805144714f0dd87a3c50a5e0a Mon Sep 17 00:00:00 2001
From: Mikhail Cheshkov
Date: Fri, 6 Sep 2024 20:52:52 +0300
Subject: [PATCH 007/415] feat(cubesql): Support join with type coercion
(#8608)
* Bump datafusion
* Add tests for join with coercion execution
---
packages/cubejs-backend-native/Cargo.lock | 12 ++--
rust/cubenativeutils/Cargo.lock | 12 ++--
rust/cubesql/Cargo.lock | 12 ++--
rust/cubesql/cubesql/Cargo.toml | 2 +-
rust/cubesql/cubesql/src/compile/test/mod.rs | 2 +
...test_df_execution__join_with_coercion.snap | 9 +++
..._execution__triple_join_with_coercion.snap | 9 +++
.../src/compile/test/test_df_execution.rs | 63 +++++++++++++++++++
rust/cubesqlplanner/Cargo.lock | 12 ++--
rust/cubesqlplanner/cubesqlplanner/Cargo.toml | 2 +-
10 files changed, 109 insertions(+), 26 deletions(-)
create mode 100644 rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__join_with_coercion.snap
create mode 100644 rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__triple_join_with_coercion.snap
create mode 100644 rust/cubesql/cubesql/src/compile/test/test_df_execution.rs
diff --git a/packages/cubejs-backend-native/Cargo.lock b/packages/cubejs-backend-native/Cargo.lock
index 039ab5a8b2430..f3b911b4b5687 100644
--- a/packages/cubejs-backend-native/Cargo.lock
+++ b/packages/cubejs-backend-native/Cargo.lock
@@ -681,7 +681,7 @@ dependencies = [
[[package]]
name = "cube-ext"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"chrono",
@@ -838,7 +838,7 @@ checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2"
[[package]]
name = "datafusion"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -871,7 +871,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"ordered-float 2.10.1",
@@ -882,7 +882,7 @@ dependencies = [
[[package]]
name = "datafusion-data-access"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"async-trait",
"chrono",
@@ -895,7 +895,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -906,7 +906,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
diff --git a/rust/cubenativeutils/Cargo.lock b/rust/cubenativeutils/Cargo.lock
index 5dc499c1c36fb..b91bfac98f144 100644
--- a/rust/cubenativeutils/Cargo.lock
+++ b/rust/cubenativeutils/Cargo.lock
@@ -629,7 +629,7 @@ dependencies = [
[[package]]
name = "cube-ext"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"chrono",
@@ -734,7 +734,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -767,7 +767,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"ordered-float 2.10.1",
@@ -778,7 +778,7 @@ dependencies = [
[[package]]
name = "datafusion-data-access"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"async-trait",
"chrono",
@@ -791,7 +791,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -802,7 +802,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
diff --git a/rust/cubesql/Cargo.lock b/rust/cubesql/Cargo.lock
index 2b7ebfb538384..a832246ac7504 100644
--- a/rust/cubesql/Cargo.lock
+++ b/rust/cubesql/Cargo.lock
@@ -721,7 +721,7 @@ dependencies = [
[[package]]
name = "cube-ext"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"chrono",
@@ -851,7 +851,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -884,7 +884,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"ordered-float 2.10.0",
@@ -895,7 +895,7 @@ dependencies = [
[[package]]
name = "datafusion-data-access"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"async-trait",
"chrono",
@@ -908,7 +908,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -919,7 +919,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
diff --git a/rust/cubesql/cubesql/Cargo.toml b/rust/cubesql/cubesql/Cargo.toml
index 9244a30743bad..e88ffd9fc6346 100644
--- a/rust/cubesql/cubesql/Cargo.toml
+++ b/rust/cubesql/cubesql/Cargo.toml
@@ -10,7 +10,7 @@ homepage = "https://cube.dev"
[dependencies]
arc-swap = "1"
-datafusion = { git = 'https://github.com/cube-js/arrow-datafusion.git', rev = "400fa0d889a8a38ca69f36d5750dfb572fc6018e", default-features = false, features = ["regex_expressions", "unicode_expressions"] }
+datafusion = { git = 'https://github.com/cube-js/arrow-datafusion.git', rev = "dcf3e4aa26fd112043ef26fa4a78db5dbd443c86", default-features = false, features = ["regex_expressions", "unicode_expressions"] }
anyhow = "1.0"
thiserror = "1.0.50"
cubeclient = { path = "../cubeclient" }
diff --git a/rust/cubesql/cubesql/src/compile/test/mod.rs b/rust/cubesql/cubesql/src/compile/test/mod.rs
index 61d04bf76e3b7..a4f468b24df9c 100644
--- a/rust/cubesql/cubesql/src/compile/test/mod.rs
+++ b/rust/cubesql/cubesql/src/compile/test/mod.rs
@@ -31,6 +31,8 @@ pub mod rewrite_engine;
#[cfg(test)]
pub mod test_bi_workarounds;
#[cfg(test)]
+pub mod test_df_execution;
+#[cfg(test)]
pub mod test_introspection;
#[cfg(test)]
pub mod test_udfs;
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__join_with_coercion.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__join_with_coercion.snap
new file mode 100644
index 0000000000000..2c7e4b8928da7
--- /dev/null
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__join_with_coercion.snap
@@ -0,0 +1,9 @@
+---
+source: cubesql/src/compile/test/test_df_execution.rs
+expression: "execute_query(r#\"\n WITH\n t1 AS (\n SELECT 1::int2 AS i1\n ),\n t2 AS (\n SELECT 1::int4 AS i2\n )\n SELECT\n *\n FROM\n t1 LEFT JOIN t2 ON (t1.i1 = t2.i2)\n \"#.to_string(),\nDatabaseProtocol::PostgreSQL,).await.unwrap()"
+---
++----+----+
+| i1 | i2 |
++----+----+
+| 1 | 1 |
++----+----+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__triple_join_with_coercion.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__triple_join_with_coercion.snap
new file mode 100644
index 0000000000000..607514df95b8c
--- /dev/null
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_df_execution__triple_join_with_coercion.snap
@@ -0,0 +1,9 @@
+---
+source: cubesql/src/compile/test/test_df_execution.rs
+expression: "execute_query(r#\"\n WITH\n t1 AS (\n SELECT 1::int2 AS i1\n ),\n t2 AS (\n SELECT 1::int4 AS i2\n ),\n t3 AS (\n SELECT 1::int8 AS i3\n )\n SELECT\n *\n FROM\n t1\n LEFT JOIN t2 ON (t1.i1 = t2.i2)\n LEFT JOIN t3 ON (t3.i3 = t2.i2)\n \"#.to_string(),\nDatabaseProtocol::PostgreSQL,).await.unwrap()"
+---
++----+----+----+
+| i1 | i2 | i3 |
++----+----+----+
+| 1 | 1 | 1 |
++----+----+----+
diff --git a/rust/cubesql/cubesql/src/compile/test/test_df_execution.rs b/rust/cubesql/cubesql/src/compile/test/test_df_execution.rs
new file mode 100644
index 0000000000000..2558517b6efe6
--- /dev/null
+++ b/rust/cubesql/cubesql/src/compile/test/test_df_execution.rs
@@ -0,0 +1,63 @@
+//! Tests that validate that complex but self-contained queries can be executed correctly by DF
+
+use crate::compile::{
+ test::{execute_query, init_testing_logger},
+ DatabaseProtocol,
+};
+
+#[tokio::test]
+async fn test_join_with_coercion() {
+ init_testing_logger();
+
+ insta::assert_snapshot!(execute_query(
+ // language=PostgreSQL
+ r#"
+ WITH
+ t1 AS (
+ SELECT 1::int2 AS i1
+ ),
+ t2 AS (
+ SELECT 1::int4 AS i2
+ )
+ SELECT
+ *
+ FROM
+ t1 LEFT JOIN t2 ON (t1.i1 = t2.i2)
+ "#
+ .to_string(),
+ DatabaseProtocol::PostgreSQL,
+ )
+ .await
+ .unwrap());
+}
+
+#[tokio::test]
+async fn test_triple_join_with_coercion() {
+ init_testing_logger();
+
+ insta::assert_snapshot!(execute_query(
+ // language=PostgreSQL
+ r#"
+ WITH
+ t1 AS (
+ SELECT 1::int2 AS i1
+ ),
+ t2 AS (
+ SELECT 1::int4 AS i2
+ ),
+ t3 AS (
+ SELECT 1::int8 AS i3
+ )
+ SELECT
+ *
+ FROM
+ t1
+ LEFT JOIN t2 ON (t1.i1 = t2.i2)
+ LEFT JOIN t3 ON (t3.i3 = t2.i2)
+ "#
+ .to_string(),
+ DatabaseProtocol::PostgreSQL,
+ )
+ .await
+ .unwrap());
+}
diff --git a/rust/cubesqlplanner/Cargo.lock b/rust/cubesqlplanner/Cargo.lock
index 30082adcc325e..58acf427b2091 100644
--- a/rust/cubesqlplanner/Cargo.lock
+++ b/rust/cubesqlplanner/Cargo.lock
@@ -639,7 +639,7 @@ dependencies = [
[[package]]
name = "cube-ext"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"chrono",
@@ -758,7 +758,7 @@ dependencies = [
[[package]]
name = "datafusion"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -791,7 +791,7 @@ dependencies = [
[[package]]
name = "datafusion-common"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"arrow",
"ordered-float 2.10.1",
@@ -802,7 +802,7 @@ dependencies = [
[[package]]
name = "datafusion-data-access"
version = "1.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"async-trait",
"chrono",
@@ -815,7 +815,7 @@ dependencies = [
[[package]]
name = "datafusion-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
@@ -826,7 +826,7 @@ dependencies = [
[[package]]
name = "datafusion-physical-expr"
version = "7.0.0"
-source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=400fa0d889a8a38ca69f36d5750dfb572fc6018e#400fa0d889a8a38ca69f36d5750dfb572fc6018e"
+source = "git+https://github.com/cube-js/arrow-datafusion.git?rev=dcf3e4aa26fd112043ef26fa4a78db5dbd443c86#dcf3e4aa26fd112043ef26fa4a78db5dbd443c86"
dependencies = [
"ahash 0.7.8",
"arrow",
diff --git a/rust/cubesqlplanner/cubesqlplanner/Cargo.toml b/rust/cubesqlplanner/cubesqlplanner/Cargo.toml
index 5d4218eb4f086..309e341b5f4fd 100644
--- a/rust/cubesqlplanner/cubesqlplanner/Cargo.toml
+++ b/rust/cubesqlplanner/cubesqlplanner/Cargo.toml
@@ -6,7 +6,7 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
-datafusion = { git = 'https://github.com/cube-js/arrow-datafusion.git', rev = "400fa0d889a8a38ca69f36d5750dfb572fc6018e", default-features = false, features = ["regex_expressions", "unicode_expressions"] }
+datafusion = { git = 'https://github.com/cube-js/arrow-datafusion.git', rev = "dcf3e4aa26fd112043ef26fa4a78db5dbd443c86", default-features = false, features = ["regex_expressions", "unicode_expressions"] }
tokio = { version = "^1.35", features = ["full", "rt", "tracing"] }
itertools = "0.10.2"
cubeclient = { path = "../../cubesql/cubeclient" }
From 2288c18bf30d1f3a3299b235fe9b4405d2cb7463 Mon Sep 17 00:00:00 2001
From: Mikhail Cheshkov
Date: Fri, 6 Sep 2024 21:40:35 +0300
Subject: [PATCH 008/415] feat(cubesql): Fill pg_description table with cube
and members descriptions (#8618)
* Add description to CubeMetaTable and CubeMetaColumn
* Fill pg_description table with cube descriptions
---
.../src/compile/engine/context_postgresql.rs | 6 +-
.../postgres/pg_description.rs | 44 ++++++++-
...mpile__tests__metabase_pg_class_query.snap | 40 ++++-----
...sts__pgcatalog_pgdescription_postgres.snap | 90 +++++++++++++++++--
...le__tests__thought_spot_table_columns.snap | 43 +++++----
rust/cubesql/cubesql/src/compile/test/mod.rs | 30 +++----
...trospection__excel_large_select_query.snap | 40 ++++-----
..._introspection__excel_select_db_query.snap | 18 ++--
...__sigma_computing_with_subquery_query.snap | 40 ++++-----
...pection__sqlalchemy_new_conname_query.snap | 40 ++++-----
...test_introspection__superset_subquery.snap | 40 ++++-----
...introspection__tableau_regclass_query.snap | 18 ++--
...tion__thoughtspot_table_introspection.snap | 40 ++++-----
rust/cubesql/cubesql/src/transport/ctx.rs | 4 +
rust/cubesql/cubesql/src/transport/ext.rs | 6 +-
15 files changed, 312 insertions(+), 187 deletions(-)
diff --git a/rust/cubesql/cubesql/src/compile/engine/context_postgresql.rs b/rust/cubesql/cubesql/src/compile/engine/context_postgresql.rs
index 68dbf97893dd9..2b1b227553d8c 100644
--- a/rust/cubesql/cubesql/src/compile/engine/context_postgresql.rs
+++ b/rust/cubesql/cubesql/src/compile/engine/context_postgresql.rs
@@ -327,7 +327,11 @@ impl DatabaseProtocol {
context.session_state.all_variables(),
)))
}
- "pg_description" => return Some(Arc::new(PgCatalogDescriptionProvider::new())),
+ "pg_description" => {
+ return Some(Arc::new(PgCatalogDescriptionProvider::new(
+ &context.meta.tables,
+ )))
+ }
"pg_constraint" => return Some(Arc::new(PgCatalogConstraintProvider::new())),
"pg_depend" => return Some(Arc::new(PgCatalogDependProvider::new())),
"pg_am" => return Some(Arc::new(PgCatalogAmProvider::new())),
diff --git a/rust/cubesql/cubesql/src/compile/engine/information_schema/postgres/pg_description.rs b/rust/cubesql/cubesql/src/compile/engine/information_schema/postgres/pg_description.rs
index bc9ecebdfed90..b3de6e0d25779 100644
--- a/rust/cubesql/cubesql/src/compile/engine/information_schema/postgres/pg_description.rs
+++ b/rust/cubesql/cubesql/src/compile/engine/information_schema/postgres/pg_description.rs
@@ -1,4 +1,4 @@
-use std::{any::Any, sync::Arc};
+use std::{any::Any, convert::TryFrom, sync::Arc};
use async_trait::async_trait;
@@ -14,10 +14,19 @@ use datafusion::{
physical_plan::{memory::MemoryExec, ExecutionPlan},
};
+use crate::{
+ compile::engine::information_schema::postgres::PG_CLASS_CLASS_OID, transport::CubeMetaTable,
+};
+
+/// See https://www.postgresql.org/docs/16/catalog-pg-description.html
struct PgCatalogDescriptionBuilder {
+ /// The OID of the object this description pertains to
objoid: UInt32Builder,
+ /// The OID of the system catalog this object appears in
classoid: UInt32Builder,
+ /// For a comment on a table column, this is the column number (the objoid and classoid refer to the table itself). For all other object types, this column is zero.
objsubid: Int32Builder,
+ /// Arbitrary text that serves as the description of this object
description: StringBuilder,
}
@@ -33,6 +42,23 @@ impl PgCatalogDescriptionBuilder {
}
}
+ fn add_table(&mut self, table_oid: u32, description: impl AsRef) {
+ self.objoid.append_value(table_oid).unwrap();
+ self.classoid.append_value(PG_CLASS_CLASS_OID).unwrap();
+ self.objsubid.append_value(0).unwrap();
+ self.description.append_value(description).unwrap();
+ }
+
+ fn add_column(&mut self, table_oid: u32, column_idx: usize, description: impl AsRef) {
+ self.objoid.append_value(table_oid).unwrap();
+ self.classoid.append_value(PG_CLASS_CLASS_OID).unwrap();
+ // Column subids starts with 1
+ self.objsubid
+ .append_value(i32::try_from(column_idx).unwrap() + 1)
+ .unwrap();
+ self.description.append_value(description).unwrap();
+ }
+
fn finish(mut self) -> Vec> {
let columns: Vec> = vec![
Arc::new(self.objoid.finish()),
@@ -50,8 +76,20 @@ pub struct PgCatalogDescriptionProvider {
}
impl PgCatalogDescriptionProvider {
- pub fn new() -> Self {
- let builder = PgCatalogDescriptionBuilder::new();
+ pub fn new(tables: &[CubeMetaTable]) -> Self {
+ let mut builder = PgCatalogDescriptionBuilder::new();
+
+ for table in tables {
+ if let Some(description) = &table.description {
+ builder.add_table(table.oid, description);
+ }
+
+ for (idx, column) in table.columns.iter().enumerate() {
+ if let Some(description) = &column.description {
+ builder.add_column(table.oid, idx, description);
+ }
+ }
+ }
Self {
data: Arc::new(builder.finish()),
diff --git a/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__metabase_pg_class_query.snap b/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__metabase_pg_class_query.snap
index c8d28203015fe..d804b18e6e1a5 100644
--- a/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__metabase_pg_class_query.snap
+++ b/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__metabase_pg_class_query.snap
@@ -2,23 +2,23 @@
source: cubesql/src/compile/mod.rs
expression: "execute_query(\"\n SELECT *\n FROM (\n SELECT n.nspname,\n c.relname,\n a.attname,\n a.atttypid,\n a.attnotnull or (t.typtype = 'd' AND t.typnotnull) AS attnotnull,\n a.atttypmod,\n a.attlen,\n t.typtypmod,\n row_number() OVER (partition BY a.attrelid ORDER BY a.attnum) AS attnum,\n NULLIF(a.attidentity, '') AS attidentity,\n pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS adsrc,\n dsc.description,\n t.typbasetype,\n t.typtype\n FROM pg_catalog.pg_namespace n\n JOIN pg_catalog.pg_class c ON (c.relnamespace = n.oid)\n JOIN pg_catalog.pg_attribute a ON (a.attrelid=c.oid)\n JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid)\n LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum)\n LEFT JOIN pg_catalog.pg_description dsc ON (c.oid=dsc.objoid AND a.attnum = dsc.objsubid)\n LEFT JOIN pg_catalog.pg_class dc ON (dc.oid=dsc.classoid AND dc.relname='pg_class')\n LEFT JOIN pg_catalog.pg_namespace dn ON (dc.relnamespace=dn.oid AND dn.nspname='pg_catalog')\n WHERE c.relkind IN ('r', 'p', 'v', 'f', 'm') AND a.attnum > 0 AND NOT a.attisdropped AND n.nspname LIKE 'public' AND c.relname LIKE 'KibanaSampleDataEcommerce') c\n WHERE true\n ORDER BY nspname, c.relname, attnum;\n \".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+---------+---------------------------+--------------------+----------+------------+-----------+--------+-----------+--------+-------------+-------+-------------+-------------+---------+
-| nspname | relname | attname | atttypid | attnotnull | atttypmod | attlen | typtypmod | attnum | attidentity | adsrc | description | typbasetype | typtype |
-+---------+---------------------------+--------------------+----------+------------+-----------+--------+-----------+--------+-------------+-------+-------------+-------------+---------+
-| public | KibanaSampleDataEcommerce | count | 20 | true | -1 | 8 | -1 | 1 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | maxPrice | 1700 | true | -1 | -1 | -1 | 2 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | sumPrice | 1700 | true | -1 | -1 | -1 | 3 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | minPrice | 1700 | true | -1 | -1 | -1 | 4 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | avgPrice | 1700 | true | -1 | -1 | -1 | 5 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | countDistinct | 20 | true | -1 | 8 | -1 | 6 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | order_date | 1114 | false | -1 | 8 | -1 | 7 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | last_mod | 1114 | false | -1 | 8 | -1 | 8 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | customer_gender | 25 | false | -1 | -1 | -1 | 9 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | notes | 25 | false | -1 | -1 | -1 | 10 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | taxful_total_price | 1700 | false | -1 | -1 | -1 | 11 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | has_subscription | 16 | false | -1 | 1 | -1 | 12 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | is_male | 16 | true | -1 | 1 | -1 | 13 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | is_female | 16 | true | -1 | 1 | -1 | 14 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | __user | 25 | false | -1 | -1 | -1 | 15 | NULL | NULL | NULL | 0 | b |
-| public | KibanaSampleDataEcommerce | __cubeJoinField | 25 | false | -1 | -1 | -1 | 16 | NULL | NULL | NULL | 0 | b |
-+---------+---------------------------+--------------------+----------+------------+-----------+--------+-----------+--------+-------------+-------+-------------+-------------+---------+
++---------+---------------------------+--------------------+----------+------------+-----------+--------+-----------+--------+-------------+-------+-----------------------------------------------+-------------+---------+
+| nspname | relname | attname | atttypid | attnotnull | atttypmod | attlen | typtypmod | attnum | attidentity | adsrc | description | typbasetype | typtype |
++---------+---------------------------+--------------------+----------+------------+-----------+--------+-----------+--------+-------------+-------+-----------------------------------------------+-------------+---------+
+| public | KibanaSampleDataEcommerce | count | 20 | true | -1 | 8 | -1 | 1 | NULL | NULL | Events count | 0 | b |
+| public | KibanaSampleDataEcommerce | maxPrice | 1700 | true | -1 | -1 | -1 | 2 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | sumPrice | 1700 | true | -1 | -1 | -1 | 3 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | minPrice | 1700 | true | -1 | -1 | -1 | 4 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | avgPrice | 1700 | true | -1 | -1 | -1 | 5 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | countDistinct | 20 | true | -1 | 8 | -1 | 6 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | order_date | 1114 | false | -1 | 8 | -1 | 7 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | last_mod | 1114 | false | -1 | 8 | -1 | 8 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | customer_gender | 25 | false | -1 | -1 | -1 | 9 | NULL | NULL | Customer gender | 0 | b |
+| public | KibanaSampleDataEcommerce | notes | 25 | false | -1 | -1 | -1 | 10 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | taxful_total_price | 1700 | false | -1 | -1 | -1 | 11 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | has_subscription | 16 | false | -1 | 1 | -1 | 12 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | is_male | 16 | true | -1 | 1 | -1 | 13 | NULL | NULL | Male users segment | 0 | b |
+| public | KibanaSampleDataEcommerce | is_female | 16 | true | -1 | 1 | -1 | 14 | NULL | NULL | NULL | 0 | b |
+| public | KibanaSampleDataEcommerce | __user | 25 | false | -1 | -1 | -1 | 15 | NULL | NULL | Virtual column for security context switching | 0 | b |
+| public | KibanaSampleDataEcommerce | __cubeJoinField | 25 | false | -1 | -1 | -1 | 16 | NULL | NULL | Virtual column for joining cubes | 0 | b |
++---------+---------------------------+--------------------+----------+------------+-----------+--------+-----------+--------+-------------+-------+-----------------------------------------------+-------------+---------+
diff --git a/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__pgcatalog_pgdescription_postgres.snap b/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__pgcatalog_pgdescription_postgres.snap
index f33ec88a25943..a8c95f329dd65 100644
--- a/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__pgcatalog_pgdescription_postgres.snap
+++ b/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__pgcatalog_pgdescription_postgres.snap
@@ -1,8 +1,88 @@
---
source: cubesql/src/compile/mod.rs
-expression: "execute_query(\"SELECT * FROM pg_catalog.pg_description\".to_string(),\n DatabaseProtocol::PostgreSQL).await?"
+expression: "execute_query(\"SELECT * FROM pg_catalog.pg_description\".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+--------+----------+----------+-------------+
-| objoid | classoid | objsubid | description |
-+--------+----------+----------+-------------+
-+--------+----------+----------+-------------+
++--------+----------+----------+-------------------------------------------------------+
+| objoid | classoid | objsubid | description |
++--------+----------+----------+-------------------------------------------------------+
+| 18000 | 1259 | 0 | Sample data for tracking eCommerce orders from Kibana |
+| 18000 | 1259 | 1 | Events count |
+| 18000 | 1259 | 9 | Customer gender |
+| 18000 | 1259 | 13 | Male users segment |
+| 18000 | 1259 | 15 | Virtual column for security context switching |
+| 18000 | 1259 | 16 | Virtual column for joining cubes |
+| 18019 | 1259 | 6 | Virtual column for security context switching |
+| 18019 | 1259 | 7 | Virtual column for joining cubes |
+| 18029 | 1259 | 2 | Virtual column for security context switching |
+| 18029 | 1259 | 3 | Virtual column for joining cubes |
+| 18035 | 1259 | 206 | Virtual column for security context switching |
+| 18035 | 1259 | 207 | Virtual column for joining cubes |
+| 18245 | 1259 | 0 | Test cube with a little bit of everything |
+| 18245 | 1259 | 1 | Test number measure 0 |
+| 18245 | 1259 | 2 | Test max(string) measure 0 |
+| 18245 | 1259 | 3 | Test max(time) measure 0 |
+| 18245 | 1259 | 4 | Test number measure 1 |
+| 18245 | 1259 | 5 | Test max(string) measure 1 |
+| 18245 | 1259 | 6 | Test max(time) measure 1 |
+| 18245 | 1259 | 7 | Test number measure 2 |
+| 18245 | 1259 | 8 | Test max(string) measure 2 |
+| 18245 | 1259 | 9 | Test max(time) measure 2 |
+| 18245 | 1259 | 10 | Test number measure 3 |
+| 18245 | 1259 | 11 | Test max(string) measure 3 |
+| 18245 | 1259 | 12 | Test max(time) measure 3 |
+| 18245 | 1259 | 13 | Test number measure 4 |
+| 18245 | 1259 | 14 | Test max(string) measure 4 |
+| 18245 | 1259 | 15 | Test max(time) measure 4 |
+| 18245 | 1259 | 16 | Test number measure 5 |
+| 18245 | 1259 | 17 | Test max(string) measure 5 |
+| 18245 | 1259 | 18 | Test max(time) measure 5 |
+| 18245 | 1259 | 19 | Test number measure 6 |
+| 18245 | 1259 | 20 | Test max(string) measure 6 |
+| 18245 | 1259 | 21 | Test max(time) measure 6 |
+| 18245 | 1259 | 22 | Test number measure 7 |
+| 18245 | 1259 | 23 | Test max(string) measure 7 |
+| 18245 | 1259 | 24 | Test max(time) measure 7 |
+| 18245 | 1259 | 25 | Test number measure 8 |
+| 18245 | 1259 | 26 | Test max(string) measure 8 |
+| 18245 | 1259 | 27 | Test max(time) measure 8 |
+| 18245 | 1259 | 28 | Test number measure 9 |
+| 18245 | 1259 | 29 | Test max(string) measure 9 |
+| 18245 | 1259 | 30 | Test max(time) measure 9 |
+| 18245 | 1259 | 31 | Test count measure |
+| 18245 | 1259 | 32 | Test maxPrice measure |
+| 18245 | 1259 | 33 | Test minPrice measure |
+| 18245 | 1259 | 34 | Test avgPrice measure |
+| 18245 | 1259 | 35 | Test countDistinct measure |
+| 18245 | 1259 | 36 | Test numeric dimention 0 |
+| 18245 | 1259 | 37 | Test string dimention 0 |
+| 18245 | 1259 | 38 | Test time dimention 0 |
+| 18245 | 1259 | 39 | Test numeric dimention 1 |
+| 18245 | 1259 | 40 | Test string dimention 1 |
+| 18245 | 1259 | 41 | Test time dimention 1 |
+| 18245 | 1259 | 42 | Test numeric dimention 2 |
+| 18245 | 1259 | 43 | Test string dimention 2 |
+| 18245 | 1259 | 44 | Test time dimention 2 |
+| 18245 | 1259 | 45 | Test numeric dimention 3 |
+| 18245 | 1259 | 46 | Test string dimention 3 |
+| 18245 | 1259 | 47 | Test time dimention 3 |
+| 18245 | 1259 | 48 | Test numeric dimention 4 |
+| 18245 | 1259 | 49 | Test string dimention 4 |
+| 18245 | 1259 | 50 | Test time dimention 4 |
+| 18245 | 1259 | 51 | Test numeric dimention 5 |
+| 18245 | 1259 | 52 | Test string dimention 5 |
+| 18245 | 1259 | 53 | Test time dimention 5 |
+| 18245 | 1259 | 54 | Test numeric dimention 6 |
+| 18245 | 1259 | 55 | Test string dimention 6 |
+| 18245 | 1259 | 56 | Test time dimention 6 |
+| 18245 | 1259 | 57 | Test numeric dimention 7 |
+| 18245 | 1259 | 58 | Test string dimention 7 |
+| 18245 | 1259 | 59 | Test time dimention 7 |
+| 18245 | 1259 | 60 | Test numeric dimention 8 |
+| 18245 | 1259 | 61 | Test string dimention 8 |
+| 18245 | 1259 | 62 | Test time dimention 8 |
+| 18245 | 1259 | 63 | Test numeric dimention 9 |
+| 18245 | 1259 | 64 | Test string dimention 9 |
+| 18245 | 1259 | 65 | Test time dimention 9 |
+| 18245 | 1259 | 66 | Virtual column for security context switching |
+| 18245 | 1259 | 67 | Virtual column for joining cubes |
++--------+----------+----------+-------------------------------------------------------+
diff --git a/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__thought_spot_table_columns.snap b/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__thought_spot_table_columns.snap
index 6e17dd07d170d..ebec7d37ee693 100644
--- a/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__thought_spot_table_columns.snap
+++ b/rust/cubesql/cubesql/src/compile/snapshots/cubesql__compile__tests__thought_spot_table_columns.snap
@@ -1,25 +1,24 @@
---
source: cubesql/src/compile/mod.rs
-assertion_line: 6432
-expression: "execute_query(\"SELECT * FROM ( SELECT current_database() AS TABLE_CAT, n.nspname AS TABLE_SCHEM, c.relname as TABLE_NAME , a.attname as COLUMN_NAME, CAST(case typname when 'text' THEN 12 when 'bit' THEN -7 when 'bool' THEN -7 when 'boolean' THEN -7 when 'varchar' THEN 12 when 'character varying' THEN 12 when 'char' THEN 1 when '\\\"char\\\"' THEN 1 when 'character' THEN 1 when 'nchar' THEN 12 when 'bpchar' THEN 1 when 'nvarchar' THEN 12 when 'date' THEN 91 when 'timestamp' THEN 93 when 'timestamp without time zone' THEN 93 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 4 when 'int' THEN 4 when 'int4' THEN 4 when 'bigint' THEN -5 when 'int8' THEN -5 when 'decimal' THEN 3 when 'real' THEN 7 when 'float4' THEN 7 when 'double precision' THEN 8 when 'float8' THEN 8 when 'float' THEN 6 when 'numeric' THEN 2 when '_float4' THEN 2003 when 'timestamptz' THEN 2014 when 'timestamp with time zone' THEN 2014 when '_aclitem' THEN 2003 when '_text' THEN 2003 when 'bytea' THEN -2 when 'oid' THEN -5 when 'name' THEN 12 when '_int4' THEN 2003 when '_int2' THEN 2003 when 'ARRAY' THEN 2003 when 'geometry' THEN -4 when 'super' THEN -16 else 1111 END as SMALLINT) AS DATA_TYPE, t.typname as TYPE_NAME, case typname when 'int4' THEN 10 when 'bit' THEN 1 when 'bool' THEN 1 when 'varchar' THEN atttypmod -4 when 'character varying' THEN atttypmod -4 when 'char' THEN atttypmod -4 when 'character' THEN atttypmod -4 when 'nchar' THEN atttypmod -4 when 'bpchar' THEN atttypmod -4 when 'nvarchar' THEN atttypmod -4 when 'date' THEN 13 when 'timestamp' THEN 29 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 10 when 'int' THEN 10 when 'int4' THEN 10 when 'bigint' THEN 19 when 'int8' THEN 19 when 'decimal' then (atttypmod - 4) >> 16 when 'real' THEN 8 when 'float4' THEN 8 when 'double precision' THEN 17 when 'float8' THEN 17 when 'float' THEN 17 when 'numeric' THEN (atttypmod - 4) >> 16 when '_float4' THEN 8 when 'timestamptz' THEN 35 when 'oid' THEN 10 when '_int4' THEN 10 when '_int2' THEN 5 when 'geometry' THEN NULL when 'super' THEN NULL else 2147483647 end as COLUMN_SIZE , null as BUFFER_LENGTH , case typname when 'float4' then 8 when 'float8' then 17 when 'numeric' then (atttypmod - 4) & 65535 when 'timestamp' then 6 when 'geometry' then NULL when 'super' then NULL else 0 end as DECIMAL_DIGITS, 10 AS NUM_PREC_RADIX , case a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) when 'false' then 1 when NULL then 2 else 0 end AS NULLABLE , dsc.description as REMARKS , pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS COLUMN_DEF, CAST(case typname when 'text' THEN 12 when 'bit' THEN -7 when 'bool' THEN -7 when 'boolean' THEN -7 when 'varchar' THEN 12 when 'character varying' THEN 12 when '\\\"char\\\"' THEN 1 when 'char' THEN 1 when 'character' THEN 1 when 'nchar' THEN 1 when 'bpchar' THEN 1 when 'nvarchar' THEN 12 when 'date' THEN 91 when 'timestamp' THEN 93 when 'timestamp without time zone' THEN 93 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 4 when 'int' THEN 4 when 'int4' THEN 4 when 'bigint' THEN -5 when 'int8' THEN -5 when 'decimal' THEN 3 when 'real' THEN 7 when 'float4' THEN 7 when 'double precision' THEN 8 when 'float8' THEN 8 when 'float' THEN 6 when 'numeric' THEN 2 when '_float4' THEN 2003 when 'timestamptz' THEN 2014 when 'timestamp with time zone' THEN 2014 when '_aclitem' THEN 2003 when '_text' THEN 2003 when 'bytea' THEN -2 when 'oid' THEN -5 when 'name' THEN 12 when '_int4' THEN 2003 when '_int2' THEN 2003 when 'ARRAY' THEN 2003 when 'geometry' THEN -4 when 'super' THEN -16 else 1111 END as SMALLINT) AS SQL_DATA_TYPE, CAST(NULL AS SMALLINT) as SQL_DATETIME_SUB , case typname when 'int4' THEN 10 when 'bit' THEN 1 when 'bool' THEN 1 when 'varchar' THEN atttypmod -4 when 'character varying' THEN atttypmod -4 when 'char' THEN atttypmod -4 when 'character' THEN atttypmod -4 when 'nchar' THEN atttypmod -4 when 'bpchar' THEN atttypmod -4 when 'nvarchar' THEN atttypmod -4 when 'date' THEN 13 when 'timestamp' THEN 29 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 10 when 'int' THEN 10 when 'int4' THEN 10 when 'bigint' THEN 19 when 'int8' THEN 19 when 'decimal' then ((atttypmod - 4) >> 16) & 65535 when 'real' THEN 8 when 'float4' THEN 8 when 'double precision' THEN 17 when 'float8' THEN 17 when 'float' THEN 17 when 'numeric' THEN ((atttypmod - 4) >> 16) & 65535 when '_float4' THEN 8 when 'timestamptz' THEN 35 when 'oid' THEN 10 when '_int4' THEN 10 when '_int2' THEN 5 when 'geometry' THEN NULL when 'super' THEN NULL else 2147483647 end as CHAR_OCTET_LENGTH , a.attnum AS ORDINAL_POSITION, case a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) when 'false' then 'YES' when NULL then '' else 'NO' end AS IS_NULLABLE, null as SCOPE_CATALOG , null as SCOPE_SCHEMA , null as SCOPE_TABLE, t.typbasetype AS SOURCE_DATA_TYPE , CASE WHEN left(pg_catalog.pg_get_expr(def.adbin, def.adrelid), 16) = 'default_identity' THEN 'YES' ELSE 'NO' END AS IS_AUTOINCREMENT, IS_AUTOINCREMENT AS IS_GENERATEDCOLUMN FROM pg_catalog.pg_namespace n JOIN pg_catalog.pg_class c ON (c.relnamespace = n.oid) JOIN pg_catalog.pg_attribute a ON (a.attrelid=c.oid) JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum) LEFT JOIN pg_catalog.pg_description dsc ON (c.oid=dsc.objoid AND a.attnum = dsc.objsubid) LEFT JOIN pg_catalog.pg_class dc ON (dc.oid=dsc.classoid AND dc.relname='pg_class') LEFT JOIN pg_catalog.pg_namespace dn ON (dc.relnamespace=dn.oid AND dn.nspname='pg_catalog') WHERE a.attnum > 0 AND NOT a.attisdropped AND current_database() = 'cubedb' AND n.nspname LIKE 'public' AND c.relname LIKE 'KibanaSampleDataEcommerce' ORDER BY TABLE_SCHEM,c.relname,attnum ) UNION ALL SELECT current_database()::VARCHAR(128) AS TABLE_CAT, schemaname::varchar(128) AS table_schem, tablename::varchar(128) AS table_name, columnname::varchar(128) AS column_name, CAST(CASE columntype_rep WHEN 'text' THEN 12 WHEN 'bit' THEN -7 WHEN 'bool' THEN -7 WHEN 'boolean' THEN -7 WHEN 'varchar' THEN 12 WHEN 'character varying' THEN 12 WHEN 'char' THEN 1 WHEN 'character' THEN 1 WHEN 'nchar' THEN 1 WHEN 'bpchar' THEN 1 WHEN 'nvarchar' THEN 12 WHEN '\\\"char\\\"' THEN 1 WHEN 'date' THEN 91 WHEN 'timestamp' THEN 93 WHEN 'timestamp without time zone' THEN 93 WHEN 'timestamp with time zone' THEN 2014 WHEN 'smallint' THEN 5 WHEN 'int2' THEN 5 WHEN 'integer' THEN 4 WHEN 'int' THEN 4 WHEN 'int4' THEN 4 WHEN 'bigint' THEN -5 WHEN 'int8' THEN -5 WHEN 'decimal' THEN 3 WHEN 'real' THEN 7 WHEN 'float4' THEN 7 WHEN 'double precision' THEN 8 WHEN 'float8' THEN 8 WHEN 'float' THEN 6 WHEN 'numeric' THEN 2 WHEN 'timestamptz' THEN 2014 WHEN 'bytea' THEN -2 WHEN 'oid' THEN -5 WHEN 'name' THEN 12 WHEN 'ARRAY' THEN 2003 WHEN 'geometry' THEN -4 WHEN 'super' THEN -16 ELSE 1111 END AS SMALLINT) AS DATA_TYPE, COALESCE(NULL,CASE columntype WHEN 'boolean' THEN 'bool' WHEN 'character varying' THEN 'varchar' WHEN '\\\"char\\\"' THEN 'char' WHEN 'smallint' THEN 'int2' WHEN 'integer' THEN 'int4'WHEN 'bigint' THEN 'int8' WHEN 'real' THEN 'float4' WHEN 'double precision' THEN 'float8' WHEN 'timestamp without time zone' THEN 'timestamp' WHEN 'timestamp with time zone' THEN 'timestamptz' ELSE columntype END) AS TYPE_NAME, CASE columntype_rep WHEN 'int4' THEN 10 WHEN 'bit' THEN 1 WHEN 'bool' THEN 1WHEN 'boolean' THEN 1WHEN 'varchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'character varying' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'char' THEN regexp_substr (columntype,'[0-9]+',4)::INTEGER WHEN 'character' THEN regexp_substr (columntype,'[0-9]+',4)::INTEGER WHEN 'nchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'bpchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'nvarchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'date' THEN 13 WHEN 'timestamp' THEN 29 WHEN 'timestamp without time zone' THEN 29 WHEN 'smallint' THEN 5 WHEN 'int2' THEN 5 WHEN 'integer' THEN 10 WHEN 'int' THEN 10 WHEN 'int4' THEN 10 WHEN 'bigint' THEN 19 WHEN 'int8' THEN 19 WHEN 'decimal' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'real' THEN 8 WHEN 'float4' THEN 8 WHEN 'double precision' THEN 17 WHEN 'float8' THEN 17 WHEN 'float' THEN 17WHEN 'numeric' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN '_float4' THEN 8 WHEN 'timestamptz' THEN 35 WHEN 'timestamp with time zone' THEN 35 WHEN 'oid' THEN 10 WHEN '_int4' THEN 10 WHEN '_int2' THEN 5 WHEN 'geometry' THEN NULL WHEN 'super' THEN NULL ELSE 2147483647 END AS COLUMN_SIZE, NULL AS BUFFER_LENGTH, CASE columntype WHEN 'real' THEN 8 WHEN 'float4' THEN 8 WHEN 'double precision' THEN 17 WHEN 'float8' THEN 17 WHEN 'timestamp' THEN 6 WHEN 'timestamp without time zone' THEN 6 WHEN 'geometry' THEN NULL WHEN 'super' THEN NULL ELSE 0 END AS DECIMAL_DIGITS, 10 AS NUM_PREC_RADIX, NULL AS NULLABLE, NULL AS REMARKS, NULL AS COLUMN_DEF, CAST(CASE columntype_rep WHEN 'text' THEN 12 WHEN 'bit' THEN -7 WHEN 'bool' THEN -7 WHEN 'boolean' THEN -7 WHEN 'varchar' THEN 12 WHEN 'character varying' THEN 12 WHEN 'char' THEN 1 WHEN 'character' THEN 1 WHEN 'nchar' THEN 12 WHEN 'bpchar' THEN 1 WHEN 'nvarchar' THEN 12 WHEN '\\\"char\\\"' THEN 1 WHEN 'date' THEN 91 WHEN 'timestamp' THEN 93 WHEN 'timestamp without time zone' THEN 93 WHEN 'timestamp with time zone' THEN 2014 WHEN 'smallint' THEN 5 WHEN 'int2' THEN 5 WHEN 'integer' THEN 4 WHEN 'int' THEN 4 WHEN 'int4' THEN 4 WHEN 'bigint' THEN -5 WHEN 'int8' THEN -5 WHEN 'decimal' THEN 3 WHEN 'real' THEN 7 WHEN 'float4' THEN 7 WHEN 'double precision' THEN 8 WHEN 'float8' THEN 8 WHEN 'float' THEN 6 WHEN 'numeric' THEN 2 WHEN 'timestamptz' THEN 2014 WHEN 'bytea' THEN -2 WHEN 'oid' THEN -5 WHEN 'name' THEN 12 WHEN 'ARRAY' THEN 2003 WHEN 'geometry' THEN -4 WHEN 'super' THEN -4 ELSE 1111 END AS SMALLINT) AS SQL_DATA_TYPE, CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB, CASE WHEN LEFT (columntype,7) = 'varchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN LEFT (columntype,4) = 'char' THEN regexp_substr (columntype,'[0-9]+',4)::INTEGER WHEN columntype = 'string' THEN 16383 ELSE NULL END AS CHAR_OCTET_LENGTH, columnnum AS ORDINAL_POSITION, NULL AS IS_NULLABLE, NULL AS SCOPE_CATALOG, NULL AS SCOPE_SCHEMA, NULL AS SCOPE_TABLE, NULL AS SOURCE_DATA_TYPE, 'NO' AS IS_AUTOINCREMENT, 'NO' as IS_GENERATEDCOLUMN FROM (select lbv_cols.schemaname, lbv_cols.tablename, lbv_cols.columnname,REGEXP_REPLACE(REGEXP_REPLACE(lbv_cols.columntype,'\\\\\\\\(.*\\\\\\\\)'),'^_.+','ARRAY') as columntype_rep,columntype, lbv_cols.columnnum from pg_get_late_binding_view_cols() lbv_cols( schemaname name, tablename name, columnname name, columntype text, columnnum int)) lbv_columns WHERE true AND current_database() = 'cubedb' AND schemaname LIKE 'public' AND tablename LIKE 'KibanaSampleDataEcommerce';\".to_string(),\n DatabaseProtocol::PostgreSQL).await?"
+expression: "execute_query(\"SELECT * FROM ( SELECT current_database() AS TABLE_CAT, n.nspname AS TABLE_SCHEM, c.relname as TABLE_NAME , a.attname as COLUMN_NAME, CAST(case typname when 'text' THEN 12 when 'bit' THEN -7 when 'bool' THEN -7 when 'boolean' THEN -7 when 'varchar' THEN 12 when 'character varying' THEN 12 when 'char' THEN 1 when '\\\"char\\\"' THEN 1 when 'character' THEN 1 when 'nchar' THEN 12 when 'bpchar' THEN 1 when 'nvarchar' THEN 12 when 'date' THEN 91 when 'timestamp' THEN 93 when 'timestamp without time zone' THEN 93 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 4 when 'int' THEN 4 when 'int4' THEN 4 when 'bigint' THEN -5 when 'int8' THEN -5 when 'decimal' THEN 3 when 'real' THEN 7 when 'float4' THEN 7 when 'double precision' THEN 8 when 'float8' THEN 8 when 'float' THEN 6 when 'numeric' THEN 2 when '_float4' THEN 2003 when 'timestamptz' THEN 2014 when 'timestamp with time zone' THEN 2014 when '_aclitem' THEN 2003 when '_text' THEN 2003 when 'bytea' THEN -2 when 'oid' THEN -5 when 'name' THEN 12 when '_int4' THEN 2003 when '_int2' THEN 2003 when 'ARRAY' THEN 2003 when 'geometry' THEN -4 when 'super' THEN -16 else 1111 END as SMALLINT) AS DATA_TYPE, t.typname as TYPE_NAME, case typname when 'int4' THEN 10 when 'bit' THEN 1 when 'bool' THEN 1 when 'varchar' THEN atttypmod -4 when 'character varying' THEN atttypmod -4 when 'char' THEN atttypmod -4 when 'character' THEN atttypmod -4 when 'nchar' THEN atttypmod -4 when 'bpchar' THEN atttypmod -4 when 'nvarchar' THEN atttypmod -4 when 'date' THEN 13 when 'timestamp' THEN 29 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 10 when 'int' THEN 10 when 'int4' THEN 10 when 'bigint' THEN 19 when 'int8' THEN 19 when 'decimal' then (atttypmod - 4) >> 16 when 'real' THEN 8 when 'float4' THEN 8 when 'double precision' THEN 17 when 'float8' THEN 17 when 'float' THEN 17 when 'numeric' THEN (atttypmod - 4) >> 16 when '_float4' THEN 8 when 'timestamptz' THEN 35 when 'oid' THEN 10 when '_int4' THEN 10 when '_int2' THEN 5 when 'geometry' THEN NULL when 'super' THEN NULL else 2147483647 end as COLUMN_SIZE , null as BUFFER_LENGTH , case typname when 'float4' then 8 when 'float8' then 17 when 'numeric' then (atttypmod - 4) & 65535 when 'timestamp' then 6 when 'geometry' then NULL when 'super' then NULL else 0 end as DECIMAL_DIGITS, 10 AS NUM_PREC_RADIX , case a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) when 'false' then 1 when NULL then 2 else 0 end AS NULLABLE , dsc.description as REMARKS , pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS COLUMN_DEF, CAST(case typname when 'text' THEN 12 when 'bit' THEN -7 when 'bool' THEN -7 when 'boolean' THEN -7 when 'varchar' THEN 12 when 'character varying' THEN 12 when '\\\"char\\\"' THEN 1 when 'char' THEN 1 when 'character' THEN 1 when 'nchar' THEN 1 when 'bpchar' THEN 1 when 'nvarchar' THEN 12 when 'date' THEN 91 when 'timestamp' THEN 93 when 'timestamp without time zone' THEN 93 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 4 when 'int' THEN 4 when 'int4' THEN 4 when 'bigint' THEN -5 when 'int8' THEN -5 when 'decimal' THEN 3 when 'real' THEN 7 when 'float4' THEN 7 when 'double precision' THEN 8 when 'float8' THEN 8 when 'float' THEN 6 when 'numeric' THEN 2 when '_float4' THEN 2003 when 'timestamptz' THEN 2014 when 'timestamp with time zone' THEN 2014 when '_aclitem' THEN 2003 when '_text' THEN 2003 when 'bytea' THEN -2 when 'oid' THEN -5 when 'name' THEN 12 when '_int4' THEN 2003 when '_int2' THEN 2003 when 'ARRAY' THEN 2003 when 'geometry' THEN -4 when 'super' THEN -16 else 1111 END as SMALLINT) AS SQL_DATA_TYPE, CAST(NULL AS SMALLINT) as SQL_DATETIME_SUB , case typname when 'int4' THEN 10 when 'bit' THEN 1 when 'bool' THEN 1 when 'varchar' THEN atttypmod -4 when 'character varying' THEN atttypmod -4 when 'char' THEN atttypmod -4 when 'character' THEN atttypmod -4 when 'nchar' THEN atttypmod -4 when 'bpchar' THEN atttypmod -4 when 'nvarchar' THEN atttypmod -4 when 'date' THEN 13 when 'timestamp' THEN 29 when 'smallint' THEN 5 when 'int2' THEN 5 when 'integer' THEN 10 when 'int' THEN 10 when 'int4' THEN 10 when 'bigint' THEN 19 when 'int8' THEN 19 when 'decimal' then ((atttypmod - 4) >> 16) & 65535 when 'real' THEN 8 when 'float4' THEN 8 when 'double precision' THEN 17 when 'float8' THEN 17 when 'float' THEN 17 when 'numeric' THEN ((atttypmod - 4) >> 16) & 65535 when '_float4' THEN 8 when 'timestamptz' THEN 35 when 'oid' THEN 10 when '_int4' THEN 10 when '_int2' THEN 5 when 'geometry' THEN NULL when 'super' THEN NULL else 2147483647 end as CHAR_OCTET_LENGTH , a.attnum AS ORDINAL_POSITION, case a.attnotnull OR (t.typtype = 'd' AND t.typnotnull) when 'false' then 'YES' when NULL then '' else 'NO' end AS IS_NULLABLE, null as SCOPE_CATALOG , null as SCOPE_SCHEMA , null as SCOPE_TABLE, t.typbasetype AS SOURCE_DATA_TYPE , CASE WHEN left(pg_catalog.pg_get_expr(def.adbin, def.adrelid), 16) = 'default_identity' THEN 'YES' ELSE 'NO' END AS IS_AUTOINCREMENT, IS_AUTOINCREMENT AS IS_GENERATEDCOLUMN FROM pg_catalog.pg_namespace n JOIN pg_catalog.pg_class c ON (c.relnamespace = n.oid) JOIN pg_catalog.pg_attribute a ON (a.attrelid=c.oid) JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum) LEFT JOIN pg_catalog.pg_description dsc ON (c.oid=dsc.objoid AND a.attnum = dsc.objsubid) LEFT JOIN pg_catalog.pg_class dc ON (dc.oid=dsc.classoid AND dc.relname='pg_class') LEFT JOIN pg_catalog.pg_namespace dn ON (dc.relnamespace=dn.oid AND dn.nspname='pg_catalog') WHERE a.attnum > 0 AND NOT a.attisdropped AND current_database() = 'cubedb' AND n.nspname LIKE 'public' AND c.relname LIKE 'KibanaSampleDataEcommerce' ORDER BY TABLE_SCHEM,c.relname,attnum ) UNION ALL SELECT current_database()::VARCHAR(128) AS TABLE_CAT, schemaname::varchar(128) AS table_schem, tablename::varchar(128) AS table_name, columnname::varchar(128) AS column_name, CAST(CASE columntype_rep WHEN 'text' THEN 12 WHEN 'bit' THEN -7 WHEN 'bool' THEN -7 WHEN 'boolean' THEN -7 WHEN 'varchar' THEN 12 WHEN 'character varying' THEN 12 WHEN 'char' THEN 1 WHEN 'character' THEN 1 WHEN 'nchar' THEN 1 WHEN 'bpchar' THEN 1 WHEN 'nvarchar' THEN 12 WHEN '\\\"char\\\"' THEN 1 WHEN 'date' THEN 91 WHEN 'timestamp' THEN 93 WHEN 'timestamp without time zone' THEN 93 WHEN 'timestamp with time zone' THEN 2014 WHEN 'smallint' THEN 5 WHEN 'int2' THEN 5 WHEN 'integer' THEN 4 WHEN 'int' THEN 4 WHEN 'int4' THEN 4 WHEN 'bigint' THEN -5 WHEN 'int8' THEN -5 WHEN 'decimal' THEN 3 WHEN 'real' THEN 7 WHEN 'float4' THEN 7 WHEN 'double precision' THEN 8 WHEN 'float8' THEN 8 WHEN 'float' THEN 6 WHEN 'numeric' THEN 2 WHEN 'timestamptz' THEN 2014 WHEN 'bytea' THEN -2 WHEN 'oid' THEN -5 WHEN 'name' THEN 12 WHEN 'ARRAY' THEN 2003 WHEN 'geometry' THEN -4 WHEN 'super' THEN -16 ELSE 1111 END AS SMALLINT) AS DATA_TYPE, COALESCE(NULL,CASE columntype WHEN 'boolean' THEN 'bool' WHEN 'character varying' THEN 'varchar' WHEN '\\\"char\\\"' THEN 'char' WHEN 'smallint' THEN 'int2' WHEN 'integer' THEN 'int4'WHEN 'bigint' THEN 'int8' WHEN 'real' THEN 'float4' WHEN 'double precision' THEN 'float8' WHEN 'timestamp without time zone' THEN 'timestamp' WHEN 'timestamp with time zone' THEN 'timestamptz' ELSE columntype END) AS TYPE_NAME, CASE columntype_rep WHEN 'int4' THEN 10 WHEN 'bit' THEN 1 WHEN 'bool' THEN 1WHEN 'boolean' THEN 1WHEN 'varchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'character varying' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'char' THEN regexp_substr (columntype,'[0-9]+',4)::INTEGER WHEN 'character' THEN regexp_substr (columntype,'[0-9]+',4)::INTEGER WHEN 'nchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'bpchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'nvarchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'date' THEN 13 WHEN 'timestamp' THEN 29 WHEN 'timestamp without time zone' THEN 29 WHEN 'smallint' THEN 5 WHEN 'int2' THEN 5 WHEN 'integer' THEN 10 WHEN 'int' THEN 10 WHEN 'int4' THEN 10 WHEN 'bigint' THEN 19 WHEN 'int8' THEN 19 WHEN 'decimal' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN 'real' THEN 8 WHEN 'float4' THEN 8 WHEN 'double precision' THEN 17 WHEN 'float8' THEN 17 WHEN 'float' THEN 17WHEN 'numeric' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN '_float4' THEN 8 WHEN 'timestamptz' THEN 35 WHEN 'timestamp with time zone' THEN 35 WHEN 'oid' THEN 10 WHEN '_int4' THEN 10 WHEN '_int2' THEN 5 WHEN 'geometry' THEN NULL WHEN 'super' THEN NULL ELSE 2147483647 END AS COLUMN_SIZE, NULL AS BUFFER_LENGTH, CASE columntype WHEN 'real' THEN 8 WHEN 'float4' THEN 8 WHEN 'double precision' THEN 17 WHEN 'float8' THEN 17 WHEN 'timestamp' THEN 6 WHEN 'timestamp without time zone' THEN 6 WHEN 'geometry' THEN NULL WHEN 'super' THEN NULL ELSE 0 END AS DECIMAL_DIGITS, 10 AS NUM_PREC_RADIX, NULL AS NULLABLE, NULL AS REMARKS, NULL AS COLUMN_DEF, CAST(CASE columntype_rep WHEN 'text' THEN 12 WHEN 'bit' THEN -7 WHEN 'bool' THEN -7 WHEN 'boolean' THEN -7 WHEN 'varchar' THEN 12 WHEN 'character varying' THEN 12 WHEN 'char' THEN 1 WHEN 'character' THEN 1 WHEN 'nchar' THEN 12 WHEN 'bpchar' THEN 1 WHEN 'nvarchar' THEN 12 WHEN '\\\"char\\\"' THEN 1 WHEN 'date' THEN 91 WHEN 'timestamp' THEN 93 WHEN 'timestamp without time zone' THEN 93 WHEN 'timestamp with time zone' THEN 2014 WHEN 'smallint' THEN 5 WHEN 'int2' THEN 5 WHEN 'integer' THEN 4 WHEN 'int' THEN 4 WHEN 'int4' THEN 4 WHEN 'bigint' THEN -5 WHEN 'int8' THEN -5 WHEN 'decimal' THEN 3 WHEN 'real' THEN 7 WHEN 'float4' THEN 7 WHEN 'double precision' THEN 8 WHEN 'float8' THEN 8 WHEN 'float' THEN 6 WHEN 'numeric' THEN 2 WHEN 'timestamptz' THEN 2014 WHEN 'bytea' THEN -2 WHEN 'oid' THEN -5 WHEN 'name' THEN 12 WHEN 'ARRAY' THEN 2003 WHEN 'geometry' THEN -4 WHEN 'super' THEN -4 ELSE 1111 END AS SMALLINT) AS SQL_DATA_TYPE, CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB, CASE WHEN LEFT (columntype,7) = 'varchar' THEN regexp_substr (columntype,'[0-9]+',7)::INTEGER WHEN LEFT (columntype,4) = 'char' THEN regexp_substr (columntype,'[0-9]+',4)::INTEGER WHEN columntype = 'string' THEN 16383 ELSE NULL END AS CHAR_OCTET_LENGTH, columnnum AS ORDINAL_POSITION, NULL AS IS_NULLABLE, NULL AS SCOPE_CATALOG, NULL AS SCOPE_SCHEMA, NULL AS SCOPE_TABLE, NULL AS SOURCE_DATA_TYPE, 'NO' AS IS_AUTOINCREMENT, 'NO' as IS_GENERATEDCOLUMN FROM (select lbv_cols.schemaname, lbv_cols.tablename, lbv_cols.columnname,REGEXP_REPLACE(REGEXP_REPLACE(lbv_cols.columntype,'\\\\\\\\(.*\\\\\\\\)'),'^_.+','ARRAY') as columntype_rep,columntype, lbv_cols.columnnum from pg_get_late_binding_view_cols() lbv_cols( schemaname name, tablename name, columnname name, columntype text, columnnum int)) lbv_columns WHERE true AND current_database() = 'cubedb' AND schemaname LIKE 'public' AND tablename LIKE 'KibanaSampleDataEcommerce';\".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+---------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
-| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | COLUMN_NAME | DATA_TYPE | TYPE_NAME | COLUMN_SIZE | BUFFER_LENGTH | DECIMAL_DIGITS | NUM_PREC_RADIX | NULLABLE | REMARKS | COLUMN_DEF | SQL_DATA_TYPE | SQL_DATETIME_SUB | CHAR_OCTET_LENGTH | ORDINAL_POSITION | IS_NULLABLE | SCOPE_CATALOG | SCOPE_SCHEMA | SCOPE_TABLE | SOURCE_DATA_TYPE | IS_AUTOINCREMENT | IS_GENERATEDCOLUMN |
-+-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+---------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
-| cubedb | public | KibanaSampleDataEcommerce | count | -5 | int8 | 19 | NULL | 0 | 10 | NULL | NULL | NULL | -5 | NULL | 19 | 1 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | maxPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 2 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | sumPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 3 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | minPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 4 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | avgPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 5 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | countDistinct | -5 | int8 | 19 | NULL | 0 | 10 | NULL | NULL | NULL | -5 | NULL | 19 | 6 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | order_date | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 7 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | last_mod | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 8 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | customer_gender | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 9 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | notes | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 10 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | taxful_total_price | 2 | numeric | -1 | NULL | 65531 | 10 | 1 | NULL | NULL | 2 | NULL | 65535 | 11 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | has_subscription | -7 | bool | 1 | NULL | 0 | 10 | 1 | NULL | NULL | -7 | NULL | 1 | 12 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | is_male | -7 | bool | 1 | NULL | 0 | 10 | NULL | NULL | NULL | -7 | NULL | 1 | 13 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | is_female | -7 | bool | 1 | NULL | 0 | 10 | NULL | NULL | NULL | -7 | NULL | 1 | 14 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | __user | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 15 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | __cubeJoinField | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 16 | YES | NULL | NULL | NULL | 0 | NO | false |
-+-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+---------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
++-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+-----------------------------------------------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
+| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | COLUMN_NAME | DATA_TYPE | TYPE_NAME | COLUMN_SIZE | BUFFER_LENGTH | DECIMAL_DIGITS | NUM_PREC_RADIX | NULLABLE | REMARKS | COLUMN_DEF | SQL_DATA_TYPE | SQL_DATETIME_SUB | CHAR_OCTET_LENGTH | ORDINAL_POSITION | IS_NULLABLE | SCOPE_CATALOG | SCOPE_SCHEMA | SCOPE_TABLE | SOURCE_DATA_TYPE | IS_AUTOINCREMENT | IS_GENERATEDCOLUMN |
++-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+-----------------------------------------------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
+| cubedb | public | KibanaSampleDataEcommerce | count | -5 | int8 | 19 | NULL | 0 | 10 | NULL | Events count | NULL | -5 | NULL | 19 | 1 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | maxPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 2 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | sumPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 3 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | minPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 4 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | avgPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 5 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | countDistinct | -5 | int8 | 19 | NULL | 0 | 10 | NULL | NULL | NULL | -5 | NULL | 19 | 6 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | order_date | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 7 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | last_mod | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 8 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | customer_gender | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | Customer gender | NULL | 12 | NULL | 2147483647 | 9 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | notes | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 10 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | taxful_total_price | 2 | numeric | -1 | NULL | 65531 | 10 | 1 | NULL | NULL | 2 | NULL | 65535 | 11 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | has_subscription | -7 | bool | 1 | NULL | 0 | 10 | 1 | NULL | NULL | -7 | NULL | 1 | 12 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | is_male | -7 | bool | 1 | NULL | 0 | 10 | NULL | Male users segment | NULL | -7 | NULL | 1 | 13 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | is_female | -7 | bool | 1 | NULL | 0 | 10 | NULL | NULL | NULL | -7 | NULL | 1 | 14 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | __user | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | Virtual column for security context switching | NULL | 12 | NULL | 2147483647 | 15 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | __cubeJoinField | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | Virtual column for joining cubes | NULL | 12 | NULL | 2147483647 | 16 | YES | NULL | NULL | NULL | 0 | NO | false |
++-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+-----------------------------------------------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
diff --git a/rust/cubesql/cubesql/src/compile/test/mod.rs b/rust/cubesql/cubesql/src/compile/test/mod.rs
index a4f468b24df9c..999960e3609ba 100644
--- a/rust/cubesql/cubesql/src/compile/test/mod.rs
+++ b/rust/cubesql/cubesql/src/compile/test/mod.rs
@@ -43,7 +43,7 @@ pub fn get_test_meta() -> Vec {
vec![
CubeMeta {
name: "KibanaSampleDataEcommerce".to_string(),
- description: None,
+ description: Some("Sample data for tracking eCommerce orders from Kibana".to_string()),
title: None,
dimensions: vec![
CubeMetaDimension {
@@ -58,7 +58,7 @@ pub fn get_test_meta() -> Vec {
},
CubeMetaDimension {
name: "KibanaSampleDataEcommerce.customer_gender".to_string(),
- description: None,
+ description: Some("Customer gender".to_string()),
_type: "string".to_string(),
},
CubeMetaDimension {
@@ -81,7 +81,7 @@ pub fn get_test_meta() -> Vec {
CubeMetaMeasure {
name: "KibanaSampleDataEcommerce.count".to_string(),
title: None,
- description: None,
+ description: Some("Events count".to_string()),
_type: "number".to_string(),
agg_type: Some("count".to_string()),
},
@@ -263,24 +263,24 @@ pub fn get_test_meta() -> Vec {
},
CubeMeta {
name: "MultiTypeCube".to_string(),
- description: None,
+ description: Some("Test cube with a little bit of everything".to_string()),
title: None,
dimensions: (0..10)
.flat_map(|i| {
[
CubeMetaDimension {
name: format!("MultiTypeCube.dim_num{}", i),
- description: None,
+ description: Some(format!("Test numeric dimention {i}")),
_type: "number".to_string(),
},
CubeMetaDimension {
name: format!("MultiTypeCube.dim_str{}", i),
- description: None,
+ description: Some(format!("Test string dimention {i}")),
_type: "string".to_string(),
},
CubeMetaDimension {
name: format!("MultiTypeCube.dim_date{}", i),
- description: None,
+ description: Some(format!("Test time dimention {i}")),
_type: "time".to_string(),
},
]
@@ -294,21 +294,21 @@ pub fn get_test_meta() -> Vec {
_type: "number".to_string(),
agg_type: Some("number".to_string()),
title: None,
- description: None,
+ description: Some(format!("Test number measure {i}")),
},
CubeMetaMeasure {
name: format!("MultiTypeCube.measure_str{}", i),
_type: "string".to_string(),
agg_type: Some("max".to_string()),
title: None,
- description: None,
+ description: Some(format!("Test max(string) measure {i}")),
},
CubeMetaMeasure {
name: format!("MultiTypeCube.measure_date{}", i),
_type: "time".to_string(),
agg_type: Some("max".to_string()),
title: None,
- description: None,
+ description: Some(format!("Test max(time) measure {i}")),
},
]
})
@@ -317,35 +317,35 @@ pub fn get_test_meta() -> Vec {
CubeMetaMeasure {
name: "MultiTypeCube.count".to_string(),
title: None,
- description: None,
+ description: Some("Test count measure".to_string()),
_type: "number".to_string(),
agg_type: Some("count".to_string()),
},
CubeMetaMeasure {
name: "MultiTypeCube.maxPrice".to_string(),
title: None,
- description: None,
+ description: Some("Test maxPrice measure".to_string()),
_type: "number".to_string(),
agg_type: Some("max".to_string()),
},
CubeMetaMeasure {
name: "MultiTypeCube.minPrice".to_string(),
title: None,
- description: None,
+ description: Some("Test minPrice measure".to_string()),
_type: "number".to_string(),
agg_type: Some("min".to_string()),
},
CubeMetaMeasure {
name: "MultiTypeCube.avgPrice".to_string(),
title: None,
- description: None,
+ description: Some("Test avgPrice measure".to_string()),
_type: "number".to_string(),
agg_type: Some("avg".to_string()),
},
CubeMetaMeasure {
name: "MultiTypeCube.countDistinct".to_string(),
title: None,
- description: None,
+ description: Some("Test countDistinct measure".to_string()),
_type: "number".to_string(),
agg_type: Some("countDistinct".to_string()),
},
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_large_select_query.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_large_select_query.snap
index 6bf1754107e6b..4d8a3a0f57060 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_large_select_query.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_large_select_query.snap
@@ -2,23 +2,23 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(\"\n SELECT\n na.nspname as Schema,\n cl.relname as Table,\n att.attname AS Name,\n att.attnum as Position,\n CASE\n WHEN att.attnotnull = 'f' THEN 'true'\n ELSE 'false'\n END as Nullable,\n CASE\n WHEN\n ty.typname Like 'bit' OR\n ty.typname Like 'varbit' and\n att.atttypmod > 0\n THEN att.atttypmod\n WHEN ty.typname Like 'interval' THEN -1\n WHEN att.atttypmod > 0 THEN att.atttypmod - 4\n ELSE att.atttypmod\n END as Length,\n (information_schema._pg_numeric_precision(information_schema._pg_truetypid(att.*, ty.*), information_schema._pg_truetypmod(att.*, ty.*)))::information_schema.cardinal_number AS Precision,\n (information_schema._pg_numeric_scale(information_schema._pg_truetypid(att.*, ty.*), information_schema._pg_truetypmod(att.*, ty.*)))::information_schema.cardinal_number AS Scale,\n (information_schema._pg_datetime_precision(information_schema._pg_truetypid(att.*, ty.*), information_schema._pg_truetypmod(att.*, ty.*)))::information_schema.cardinal_number AS DatetimeLength,\n CASE\n WHEN att.attnotnull = 'f' THEN 'false'\n ELSE 'true'\n END as IsUnique,\n att.atthasdef as HasDefaultValue,\n att.attisdropped as IsDropped,\n att.attinhcount as ancestorCount,\n att.attndims as Dimension,\n CASE\n WHEN attndims > 0 THEN true\n ELSE false\n END AS isarray,\n CASE\n WHEN ty.typname = 'bpchar' THEN 'char'\n WHEN ty.typname = '_bpchar' THEN '_char'\n ELSE ty.typname\n END as TypeName,\n tn.nspname as TypeSchema,\n et.typname as elementaltypename,\n description as Comment,\n cs.relname AS sername,\n ns.nspname AS serschema,\n att.attidentity as IdentityMode,\n CAST(pg_get_expr(def.adbin, def.adrelid) AS varchar) as DefaultValue,\n (SELECT count(1) FROM pg_type t2 WHERE t2.typname=ty.typname) > 1 AS isdup\n FROM pg_attribute att\n JOIN pg_type ty ON ty.oid=atttypid\n JOIN pg_namespace tn ON tn.oid=ty.typnamespace\n JOIN pg_class cl ON\n cl.oid=attrelid AND\n (\n (cl.relkind = 'r') OR\n (cl.relkind = 's') OR\n (cl.relkind = 'v') OR\n (cl.relkind = 'm') OR\n (cl.relkind = 'f')\n )\n JOIN pg_namespace na ON na.oid=cl.relnamespace\n LEFT OUTER JOIN pg_type et ON et.oid=ty.typelem\n LEFT OUTER JOIN pg_attrdef def ON\n adrelid=attrelid AND\n adnum=attnum\n LEFT OUTER JOIN pg_description des ON\n des.objoid=attrelid AND\n des.objsubid=attnum\n LEFT OUTER JOIN (\n pg_depend\n JOIN pg_class cs ON\n objid=cs.oid AND\n cs.relkind='S' AND\n classid='pg_class'::regclass::oid\n ) ON\n refobjid=attrelid AND\n refobjsubid=attnum\n LEFT OUTER JOIN pg_namespace ns ON ns.oid=cs.relnamespace\n WHERE\n attnum > 0 AND\n attisdropped IS FALSE AND\n cl.relname like E'KibanaSampleDataEcommerce' AND\n na.nspname like E'public' AND\n att.attname like '%'\n ORDER BY attnum\n ;\n \".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+--------+---------------------------+--------------------+----------+----------+--------+-----------+-------+----------------+----------+-----------------+-----------+---------------+-----------+---------+-----------+------------+-------------------+---------+---------+-----------+--------------+--------------+-------+
-| Schema | Table | Name | Position | Nullable | Length | Precision | Scale | DatetimeLength | IsUnique | HasDefaultValue | IsDropped | ancestorCount | Dimension | isarray | TypeName | TypeSchema | elementaltypename | Comment | sername | serschema | IdentityMode | DefaultValue | isdup |
-+--------+---------------------------+--------------------+----------+----------+--------+-----------+-------+----------------+----------+-----------------+-----------+---------------+-----------+---------+-----------+------------+-------------------+---------+---------+-----------+--------------+--------------+-------+
-| public | KibanaSampleDataEcommerce | count | 1 | false | -1 | 64 | 0 | NULL | true | false | false | 0 | 0 | false | int8 | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | maxPrice | 2 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | sumPrice | 3 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | minPrice | 4 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | avgPrice | 5 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | countDistinct | 6 | false | -1 | 64 | 0 | NULL | true | false | false | 0 | 0 | false | int8 | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | order_date | 7 | true | -1 | NULL | NULL | 6 | false | false | false | 0 | 0 | false | timestamp | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | last_mod | 8 | true | -1 | NULL | NULL | 6 | false | false | false | 0 | 0 | false | timestamp | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | customer_gender | 9 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | notes | 10 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | taxful_total_price | 11 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | has_subscription | 12 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | bool | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | is_male | 13 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | bool | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | is_female | 14 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | bool | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | __user | 15 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-| public | KibanaSampleDataEcommerce | __cubeJoinField | 16 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
-+--------+---------------------------+--------------------+----------+----------+--------+-----------+-------+----------------+----------+-----------------+-----------+---------------+-----------+---------+-----------+------------+-------------------+---------+---------+-----------+--------------+--------------+-------+
++--------+---------------------------+--------------------+----------+----------+--------+-----------+-------+----------------+----------+-----------------+-----------+---------------+-----------+---------+-----------+------------+-------------------+-----------------------------------------------+---------+-----------+--------------+--------------+-------+
+| Schema | Table | Name | Position | Nullable | Length | Precision | Scale | DatetimeLength | IsUnique | HasDefaultValue | IsDropped | ancestorCount | Dimension | isarray | TypeName | TypeSchema | elementaltypename | Comment | sername | serschema | IdentityMode | DefaultValue | isdup |
++--------+---------------------------+--------------------+----------+----------+--------+-----------+-------+----------------+----------+-----------------+-----------+---------------+-----------+---------+-----------+------------+-------------------+-----------------------------------------------+---------+-----------+--------------+--------------+-------+
+| public | KibanaSampleDataEcommerce | count | 1 | false | -1 | 64 | 0 | NULL | true | false | false | 0 | 0 | false | int8 | pg_catalog | NULL | Events count | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | maxPrice | 2 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | sumPrice | 3 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | minPrice | 4 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | avgPrice | 5 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | countDistinct | 6 | false | -1 | 64 | 0 | NULL | true | false | false | 0 | 0 | false | int8 | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | order_date | 7 | true | -1 | NULL | NULL | 6 | false | false | false | 0 | 0 | false | timestamp | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | last_mod | 8 | true | -1 | NULL | NULL | 6 | false | false | false | 0 | 0 | false | timestamp | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | customer_gender | 9 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | Customer gender | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | notes | 10 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | taxful_total_price | 11 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | numeric | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | has_subscription | 12 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | bool | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | is_male | 13 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | bool | pg_catalog | NULL | Male users segment | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | is_female | 14 | false | -1 | NULL | NULL | NULL | true | false | false | 0 | 0 | false | bool | pg_catalog | NULL | NULL | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | __user | 15 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | Virtual column for security context switching | NULL | NULL | | NULL | false |
+| public | KibanaSampleDataEcommerce | __cubeJoinField | 16 | true | -1 | NULL | NULL | NULL | false | false | false | 0 | 0 | false | text | pg_catalog | NULL | Virtual column for joining cubes | NULL | NULL | | NULL | false |
++--------+---------------------------+--------------------+----------+----------+--------+-----------+-------+----------------+----------+-----------------+-----------+---------------+-----------+---------+-----------+------------+-------------------+-----------------------------------------------+---------+-----------+--------------+--------------+-------+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_select_db_query.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_select_db_query.snap
index d54385dfdf071..6c3c50c674549 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_select_db_query.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__excel_select_db_query.snap
@@ -2,12 +2,12 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(\"\n SELECT\n 'db' as Database,\n ns.nspname as Schema,\n relname as Name,\n CASE\n WHEN ns.nspname Like E'pg\\\\_catalog' then 'Catalog'\n WHEN ns.nspname Like E'information\\\\_schema' then 'Information'\n WHEN relkind = 'f' then 'Foreign'\n ELSE 'User'\n END as TableType,\n pg_get_userbyid(relowner) AS definer,\n rel.oid as Oid,\n relacl as ACL,\n true as HasOids,\n relhassubclass as HasSubtables,\n reltuples as RowNumber,\n description as Comment,\n relnatts as ColumnNumber,\n relhastriggers as TriggersNumber,\n conname as Constraint,\n conkey as ColumnConstrainsIndexes\n FROM pg_class rel\n INNER JOIN pg_namespace ns ON relnamespace = ns.oid\n LEFT OUTER JOIN pg_description des ON\n des.objoid = rel.oid AND\n des.objsubid = 0\n LEFT OUTER JOIN pg_constraint c ON\n c.conrelid = rel.oid AND\n c.contype = 'p'\n WHERE\n (\n (relkind = 'r') OR\n (relkind = 's') OR\n (relkind = 'f')\n ) AND\n NOT ns.nspname LIKE E'pg\\\\_temp\\\\_%%' AND\n NOT ns.nspname like E'pg\\\\_%' AND\n NOT ns.nspname like E'information\\\\_schema' AND\n ns.nspname::varchar like E'public' AND\n relname::varchar like '%' AND\n pg_get_userbyid(relowner)::varchar like '%'\n ORDER BY relname\n ;\n \".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+----------+--------+---------------------------+-----------+---------+-------+------+---------+--------------+-----------+---------+--------------+----------------+------------+-------------------------+
-| Database | Schema | Name | TableType | definer | Oid | ACL | HasOids | HasSubtables | RowNumber | Comment | ColumnNumber | TriggersNumber | Constraint | ColumnConstrainsIndexes |
-+----------+--------+---------------------------+-----------+---------+-------+------+---------+--------------+-----------+---------+--------------+----------------+------------+-------------------------+
-| db | public | KibanaSampleDataEcommerce | User | ovr | 18000 | NULL | true | false | -1 | NULL | 16 | false | NULL | NULL |
-| db | public | Logs | User | ovr | 18019 | NULL | true | false | -1 | NULL | 7 | false | NULL | NULL |
-| db | public | MultiTypeCube | User | ovr | 18245 | NULL | true | false | -1 | NULL | 67 | false | NULL | NULL |
-| db | public | NumberCube | User | ovr | 18029 | NULL | true | false | -1 | NULL | 3 | false | NULL | NULL |
-| db | public | WideCube | User | ovr | 18035 | NULL | true | false | -1 | NULL | 207 | false | NULL | NULL |
-+----------+--------+---------------------------+-----------+---------+-------+------+---------+--------------+-----------+---------+--------------+----------------+------------+-------------------------+
++----------+--------+---------------------------+-----------+---------+-------+------+---------+--------------+-----------+-------------------------------------------------------+--------------+----------------+------------+-------------------------+
+| Database | Schema | Name | TableType | definer | Oid | ACL | HasOids | HasSubtables | RowNumber | Comment | ColumnNumber | TriggersNumber | Constraint | ColumnConstrainsIndexes |
++----------+--------+---------------------------+-----------+---------+-------+------+---------+--------------+-----------+-------------------------------------------------------+--------------+----------------+------------+-------------------------+
+| db | public | KibanaSampleDataEcommerce | User | ovr | 18000 | NULL | true | false | -1 | Sample data for tracking eCommerce orders from Kibana | 16 | false | NULL | NULL |
+| db | public | Logs | User | ovr | 18019 | NULL | true | false | -1 | NULL | 7 | false | NULL | NULL |
+| db | public | MultiTypeCube | User | ovr | 18245 | NULL | true | false | -1 | Test cube with a little bit of everything | 67 | false | NULL | NULL |
+| db | public | NumberCube | User | ovr | 18029 | NULL | true | false | -1 | NULL | 3 | false | NULL | NULL |
+| db | public | WideCube | User | ovr | 18035 | NULL | true | false | -1 | NULL | 207 | false | NULL | NULL |
++----------+--------+---------------------------+-----------+---------+-------+------+---------+--------------+-----------+-------------------------------------------------------+--------------+----------------+------------+-------------------------+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sigma_computing_with_subquery_query.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sigma_computing_with_subquery_query.snap
index dec355a0046fb..57c2822c9acdb 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sigma_computing_with_subquery_query.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sigma_computing_with_subquery_query.snap
@@ -2,23 +2,23 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(\"\n with\n nsp as (\n select oid\n from pg_catalog.pg_namespace\n where nspname = 'public'\n ),\n tbl as (\n select oid\n from pg_catalog.pg_class\n where\n relname = 'KibanaSampleDataEcommerce' and\n relnamespace = (select oid from nsp)\n )\n select\n attname,\n typname,\n description\n from pg_attribute a\n join pg_type on atttypid = pg_type.oid\n left join pg_description on\n attrelid = objoid and\n attnum = objsubid\n where\n attnum > 0 and\n attrelid = (select oid from tbl)\n order by attnum\n ;\n \".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+--------------------+-----------+-------------+
-| attname | typname | description |
-+--------------------+-----------+-------------+
-| count | int8 | NULL |
-| maxPrice | numeric | NULL |
-| sumPrice | numeric | NULL |
-| minPrice | numeric | NULL |
-| avgPrice | numeric | NULL |
-| countDistinct | int8 | NULL |
-| order_date | timestamp | NULL |
-| last_mod | timestamp | NULL |
-| customer_gender | text | NULL |
-| notes | text | NULL |
-| taxful_total_price | numeric | NULL |
-| has_subscription | bool | NULL |
-| is_male | bool | NULL |
-| is_female | bool | NULL |
-| __user | text | NULL |
-| __cubeJoinField | text | NULL |
-+--------------------+-----------+-------------+
++--------------------+-----------+-----------------------------------------------+
+| attname | typname | description |
++--------------------+-----------+-----------------------------------------------+
+| count | int8 | Events count |
+| maxPrice | numeric | NULL |
+| sumPrice | numeric | NULL |
+| minPrice | numeric | NULL |
+| avgPrice | numeric | NULL |
+| countDistinct | int8 | NULL |
+| order_date | timestamp | NULL |
+| last_mod | timestamp | NULL |
+| customer_gender | text | Customer gender |
+| notes | text | NULL |
+| taxful_total_price | numeric | NULL |
+| has_subscription | bool | NULL |
+| is_male | bool | Male users segment |
+| is_female | bool | NULL |
+| __user | text | Virtual column for security context switching |
+| __cubeJoinField | text | Virtual column for joining cubes |
++--------------------+-----------+-----------------------------------------------+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sqlalchemy_new_conname_query.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sqlalchemy_new_conname_query.snap
index 460faff0ba4a2..39066ea680e91 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sqlalchemy_new_conname_query.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__sqlalchemy_new_conname_query.snap
@@ -2,23 +2,23 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(r#\"SELECT\n a.attname,\n pg_catalog.format_type(a.atttypid, a.atttypmod),\n (\n SELECT\n pg_catalog.pg_get_expr(d.adbin, d.adrelid)\n FROM\n pg_catalog.pg_attrdef AS d\n WHERE\n d.adrelid = a.attrelid\n AND d.adnum = a.attnum\n AND a.atthasdef\n ) AS DEFAULT,\n a.attnotnull,\n a.attrelid AS table_oid,\n pgd.description AS comment,\n a.attgenerated AS generated,\n (\n SELECT\n json_build_object(\n 'always',\n a.attidentity = 'a',\n 'start',\n s.seqstart,\n 'increment',\n s.seqincrement,\n 'minvalue',\n s.seqmin,\n 'maxvalue',\n s.seqmax,\n 'cache',\n s.seqcache,\n 'cycle',\n s.seqcycle\n )\n FROM\n pg_catalog.pg_sequence AS s\n JOIN pg_catalog.pg_class AS c ON s.seqrelid = c.\"oid\"\n WHERE\n c.relkind = 'S'\n AND a.attidentity <> ''\n AND s.seqrelid = CAST(\n pg_catalog.pg_get_serial_sequence(\n CAST(CAST(a.attrelid AS REGCLASS) AS TEXT),\n a.attname\n ) AS REGCLASS\n )\n ) AS identity_options\n FROM\n pg_catalog.pg_attribute AS a\n LEFT JOIN pg_catalog.pg_description AS pgd ON (\n pgd.objoid = a.attrelid\n AND pgd.objsubid = a.attnum\n )\n WHERE\n a.attrelid = 18000\n AND a.attnum > 0\n AND NOT a.attisdropped\n ORDER BY\n a.attnum\"#.to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+--------------------+-------------------------------------+---------+------------+-----------+---------+-----------+------------------+
-| attname | format_type(a.atttypid,a.atttypmod) | DEFAULT | attnotnull | table_oid | comment | generated | identity_options |
-+--------------------+-------------------------------------+---------+------------+-----------+---------+-----------+------------------+
-| count | bigint | NULL | true | 18000 | NULL | | NULL |
-| maxPrice | numeric | NULL | true | 18000 | NULL | | NULL |
-| sumPrice | numeric | NULL | true | 18000 | NULL | | NULL |
-| minPrice | numeric | NULL | true | 18000 | NULL | | NULL |
-| avgPrice | numeric | NULL | true | 18000 | NULL | | NULL |
-| countDistinct | bigint | NULL | true | 18000 | NULL | | NULL |
-| order_date | timestamp without time zone | NULL | false | 18000 | NULL | | NULL |
-| last_mod | timestamp without time zone | NULL | false | 18000 | NULL | | NULL |
-| customer_gender | text | NULL | false | 18000 | NULL | | NULL |
-| notes | text | NULL | false | 18000 | NULL | | NULL |
-| taxful_total_price | numeric | NULL | false | 18000 | NULL | | NULL |
-| has_subscription | boolean | NULL | false | 18000 | NULL | | NULL |
-| is_male | boolean | NULL | true | 18000 | NULL | | NULL |
-| is_female | boolean | NULL | true | 18000 | NULL | | NULL |
-| __user | text | NULL | false | 18000 | NULL | | NULL |
-| __cubeJoinField | text | NULL | false | 18000 | NULL | | NULL |
-+--------------------+-------------------------------------+---------+------------+-----------+---------+-----------+------------------+
++--------------------+-------------------------------------+---------+------------+-----------+-----------------------------------------------+-----------+------------------+
+| attname | format_type(a.atttypid,a.atttypmod) | DEFAULT | attnotnull | table_oid | comment | generated | identity_options |
++--------------------+-------------------------------------+---------+------------+-----------+-----------------------------------------------+-----------+------------------+
+| count | bigint | NULL | true | 18000 | Events count | | NULL |
+| maxPrice | numeric | NULL | true | 18000 | NULL | | NULL |
+| sumPrice | numeric | NULL | true | 18000 | NULL | | NULL |
+| minPrice | numeric | NULL | true | 18000 | NULL | | NULL |
+| avgPrice | numeric | NULL | true | 18000 | NULL | | NULL |
+| countDistinct | bigint | NULL | true | 18000 | NULL | | NULL |
+| order_date | timestamp without time zone | NULL | false | 18000 | NULL | | NULL |
+| last_mod | timestamp without time zone | NULL | false | 18000 | NULL | | NULL |
+| customer_gender | text | NULL | false | 18000 | Customer gender | | NULL |
+| notes | text | NULL | false | 18000 | NULL | | NULL |
+| taxful_total_price | numeric | NULL | false | 18000 | NULL | | NULL |
+| has_subscription | boolean | NULL | false | 18000 | NULL | | NULL |
+| is_male | boolean | NULL | true | 18000 | Male users segment | | NULL |
+| is_female | boolean | NULL | true | 18000 | NULL | | NULL |
+| __user | text | NULL | false | 18000 | Virtual column for security context switching | | NULL |
+| __cubeJoinField | text | NULL | false | 18000 | Virtual column for joining cubes | | NULL |
++--------------------+-------------------------------------+---------+------------+-----------+-----------------------------------------------+-----------+------------------+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__superset_subquery.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__superset_subquery.snap
index 5808a86aacb4e..948c96230dcc4 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__superset_subquery.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__superset_subquery.snap
@@ -2,23 +2,23 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(\"\n SELECT\n a.attname,\n pg_catalog.format_type(a.atttypid, a.atttypmod),\n (\n SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid)\n FROM pg_catalog.pg_attrdef d\n WHERE\n d.adrelid = a.attrelid AND\n d.adnum = a.attnum AND\n a.atthasdef\n ) AS DEFAULT,\n a.attnotnull,\n a.attnum,\n a.attrelid as table_oid,\n pgd.description as comment,\n a.attgenerated as generated\n FROM pg_catalog.pg_attribute a\n LEFT JOIN pg_catalog.pg_description pgd ON (\n pgd.objoid = a.attrelid AND\n pgd.objsubid = a.attnum\n )\n WHERE\n a.attrelid = 18000\n AND a.attnum > 0\n AND NOT a.attisdropped\n ORDER BY a.attnum\n ;\n \".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+--------------------+-------------------------------------+---------+------------+--------+-----------+---------+-----------+
-| attname | format_type(a.atttypid,a.atttypmod) | DEFAULT | attnotnull | attnum | table_oid | comment | generated |
-+--------------------+-------------------------------------+---------+------------+--------+-----------+---------+-----------+
-| count | bigint | NULL | true | 1 | 18000 | NULL | |
-| maxPrice | numeric | NULL | true | 2 | 18000 | NULL | |
-| sumPrice | numeric | NULL | true | 3 | 18000 | NULL | |
-| minPrice | numeric | NULL | true | 4 | 18000 | NULL | |
-| avgPrice | numeric | NULL | true | 5 | 18000 | NULL | |
-| countDistinct | bigint | NULL | true | 6 | 18000 | NULL | |
-| order_date | timestamp without time zone | NULL | false | 7 | 18000 | NULL | |
-| last_mod | timestamp without time zone | NULL | false | 8 | 18000 | NULL | |
-| customer_gender | text | NULL | false | 9 | 18000 | NULL | |
-| notes | text | NULL | false | 10 | 18000 | NULL | |
-| taxful_total_price | numeric | NULL | false | 11 | 18000 | NULL | |
-| has_subscription | boolean | NULL | false | 12 | 18000 | NULL | |
-| is_male | boolean | NULL | true | 13 | 18000 | NULL | |
-| is_female | boolean | NULL | true | 14 | 18000 | NULL | |
-| __user | text | NULL | false | 15 | 18000 | NULL | |
-| __cubeJoinField | text | NULL | false | 16 | 18000 | NULL | |
-+--------------------+-------------------------------------+---------+------------+--------+-----------+---------+-----------+
++--------------------+-------------------------------------+---------+------------+--------+-----------+-----------------------------------------------+-----------+
+| attname | format_type(a.atttypid,a.atttypmod) | DEFAULT | attnotnull | attnum | table_oid | comment | generated |
++--------------------+-------------------------------------+---------+------------+--------+-----------+-----------------------------------------------+-----------+
+| count | bigint | NULL | true | 1 | 18000 | Events count | |
+| maxPrice | numeric | NULL | true | 2 | 18000 | NULL | |
+| sumPrice | numeric | NULL | true | 3 | 18000 | NULL | |
+| minPrice | numeric | NULL | true | 4 | 18000 | NULL | |
+| avgPrice | numeric | NULL | true | 5 | 18000 | NULL | |
+| countDistinct | bigint | NULL | true | 6 | 18000 | NULL | |
+| order_date | timestamp without time zone | NULL | false | 7 | 18000 | NULL | |
+| last_mod | timestamp without time zone | NULL | false | 8 | 18000 | NULL | |
+| customer_gender | text | NULL | false | 9 | 18000 | Customer gender | |
+| notes | text | NULL | false | 10 | 18000 | NULL | |
+| taxful_total_price | numeric | NULL | false | 11 | 18000 | NULL | |
+| has_subscription | boolean | NULL | false | 12 | 18000 | NULL | |
+| is_male | boolean | NULL | true | 13 | 18000 | Male users segment | |
+| is_female | boolean | NULL | true | 14 | 18000 | NULL | |
+| __user | text | NULL | false | 15 | 18000 | Virtual column for security context switching | |
+| __cubeJoinField | text | NULL | false | 16 | 18000 | Virtual column for joining cubes | |
++--------------------+-------------------------------------+---------+------------+--------+-----------+-----------------------------------------------+-----------+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__tableau_regclass_query.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__tableau_regclass_query.snap
index af41cdf149bf3..fd6a8ec013151 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__tableau_regclass_query.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__tableau_regclass_query.snap
@@ -2,12 +2,12 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(\"SELECT NULL AS TABLE_CAT,\n n.nspname AS TABLE_SCHEM,\n c.relname AS TABLE_NAME,\n CASE n.nspname ~ '^pg_'\n OR n.nspname = 'information_schema'\n WHEN true THEN\n CASE\n WHEN n.nspname = 'pg_catalog'\n OR n.nspname = 'information_schema' THEN\n CASE c.relkind\n WHEN 'r' THEN 'SYSTEM TABLE'\n WHEN 'v' THEN 'SYSTEM VIEW'\n WHEN 'i' THEN 'SYSTEM INDEX'\n ELSE NULL\n end\n WHEN n.nspname = 'pg_toast' THEN\n CASE c.relkind\n WHEN 'r' THEN 'SYSTEM TOAST TABLE'\n WHEN 'i' THEN 'SYSTEM TOAST INDEX'\n ELSE NULL\n end\n ELSE\n CASE c.relkind\n WHEN 'r' THEN 'TEMPORARY TABLE'\n WHEN 'p' THEN 'TEMPORARY TABLE'\n WHEN 'i' THEN 'TEMPORARY INDEX'\n WHEN 'S' THEN 'TEMPORARY SEQUENCE'\n WHEN 'v' THEN 'TEMPORARY VIEW'\n ELSE NULL\n end\n end\n WHEN false THEN\n CASE c.relkind\n WHEN 'r' THEN 'TABLE'\n WHEN 'p' THEN 'PARTITIONED TABLE'\n WHEN 'i' THEN 'INDEX'\n WHEN 'P' THEN 'PARTITIONED INDEX'\n WHEN 'S' THEN 'SEQUENCE'\n WHEN 'v' THEN 'VIEW'\n WHEN 'c' THEN 'TYPE'\n WHEN 'f' THEN 'FOREIGN TABLE'\n WHEN 'm' THEN 'MATERIALIZED VIEW'\n ELSE NULL\n end\n ELSE NULL\n end AS TABLE_TYPE,\n d.description AS REMARKS,\n '' AS TYPE_CAT,\n '' AS TYPE_SCHEM,\n '' AS TYPE_NAME,\n '' AS SELF_REFERENCING_COL_NAME,\n '' AS REF_GENERATION\n FROM pg_catalog.pg_namespace n,\n pg_catalog.pg_class c\n LEFT JOIN pg_catalog.pg_description d\n ON ( c.oid = d.objoid\n AND d.objsubid = 0\n AND d.classoid = 'pg_class' :: regclass )\n WHERE c.relnamespace = n.oid\n AND ( false\n OR ( c.relkind = 'f' )\n OR ( c.relkind = 'm' )\n OR ( c.relkind = 'p'\n AND n.nspname !~ '^pg_'\n AND n.nspname <> 'information_schema' )\n OR ( c.relkind = 'r'\n AND n.nspname !~ '^pg_'\n AND n.nspname <> 'information_schema' )\n OR ( c.relkind = 'v'\n AND n.nspname <> 'pg_catalog'\n AND n.nspname <> 'information_schema' ) )\n ORDER BY TABLE_SCHEM ASC, TABLE_NAME ASC\n ;\".to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+-----------+-------------+---------------------------+------------+---------+----------+------------+-----------+---------------------------+----------------+
-| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | TABLE_TYPE | REMARKS | TYPE_CAT | TYPE_SCHEM | TYPE_NAME | SELF_REFERENCING_COL_NAME | REF_GENERATION |
-+-----------+-------------+---------------------------+------------+---------+----------+------------+-----------+---------------------------+----------------+
-| NULL | public | KibanaSampleDataEcommerce | TABLE | NULL | | | | | |
-| NULL | public | Logs | TABLE | NULL | | | | | |
-| NULL | public | MultiTypeCube | TABLE | NULL | | | | | |
-| NULL | public | NumberCube | TABLE | NULL | | | | | |
-| NULL | public | WideCube | TABLE | NULL | | | | | |
-+-----------+-------------+---------------------------+------------+---------+----------+------------+-----------+---------------------------+----------------+
++-----------+-------------+---------------------------+------------+-------------------------------------------------------+----------+------------+-----------+---------------------------+----------------+
+| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | TABLE_TYPE | REMARKS | TYPE_CAT | TYPE_SCHEM | TYPE_NAME | SELF_REFERENCING_COL_NAME | REF_GENERATION |
++-----------+-------------+---------------------------+------------+-------------------------------------------------------+----------+------------+-----------+---------------------------+----------------+
+| NULL | public | KibanaSampleDataEcommerce | TABLE | Sample data for tracking eCommerce orders from Kibana | | | | | |
+| NULL | public | Logs | TABLE | NULL | | | | | |
+| NULL | public | MultiTypeCube | TABLE | Test cube with a little bit of everything | | | | | |
+| NULL | public | NumberCube | TABLE | NULL | | | | | |
+| NULL | public | WideCube | TABLE | NULL | | | | | |
++-----------+-------------+---------------------------+------------+-------------------------------------------------------+----------+------------+-----------+---------------------------+----------------+
diff --git a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__thoughtspot_table_introspection.snap b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__thoughtspot_table_introspection.snap
index 590bbb9bf4a54..631471621aeb1 100644
--- a/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__thoughtspot_table_introspection.snap
+++ b/rust/cubesql/cubesql/src/compile/test/snapshots/cubesql__compile__test__test_introspection__thoughtspot_table_introspection.snap
@@ -2,23 +2,23 @@
source: cubesql/src/compile/test/test_introspection.rs
expression: "execute_query(r#\"\n SELECT *\n FROM (\n SELECT\n current_database() AS TABLE_CAT,\n n.nspname AS TABLE_SCHEM,\n c.relname AS TABLE_NAME,\n a.attname AS COLUMN_NAME,\n CAST(\n CASE typname\n WHEN 'text' THEN 12\n WHEN 'bit' THEN - 7\n WHEN 'bool' THEN - 7\n WHEN 'boolean' THEN - 7\n WHEN 'varchar' THEN 12\n WHEN 'character varying' THEN 12\n WHEN 'char' THEN 1\n WHEN '\"char\"' THEN 1\n WHEN 'character' THEN 1\n WHEN 'nchar' THEN 12\n WHEN 'bpchar' THEN 1\n WHEN 'nvarchar' THEN 12\n WHEN 'date' THEN 91\n WHEN 'time' THEN 92\n WHEN 'time without time zone' THEN 92\n WHEN 'timetz' THEN 2013\n WHEN 'time with time zone' THEN 2013\n WHEN 'timestamp' THEN 93\n WHEN 'timestamp without time zone' THEN 93\n WHEN 'timestamptz' THEN 2014\n WHEN 'timestamp with time zone' THEN 2014\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 4\n WHEN 'int' THEN 4\n WHEN 'int4' THEN 4\n WHEN 'bigint' THEN - 5\n WHEN 'int8' THEN - 5\n WHEN 'decimal' THEN 3\n WHEN 'real' THEN 7\n WHEN 'float4' THEN 7\n WHEN 'double precision' THEN 8\n WHEN 'float8' THEN 8\n WHEN 'float' THEN 6\n WHEN 'numeric' THEN 2\n WHEN '_float4' THEN 2003\n WHEN '_aclitem' THEN 2003\n WHEN '_text' THEN 2003\n WHEN 'bytea' THEN - 2\n WHEN 'oid' THEN - 5\n WHEN 'name' THEN 12\n WHEN '_int4' THEN 2003\n WHEN '_int2' THEN 2003\n WHEN 'ARRAY' THEN 2003\n WHEN 'geometry' THEN - 4\n WHEN 'super' THEN - 16\n WHEN 'varbyte' THEN - 4\n WHEN 'geography' THEN - 4\n ELSE 1111\n END\n AS SMALLINT\n ) AS DATA_TYPE,\n t.typname AS TYPE_NAME,\n CASE typname\n WHEN 'int4' THEN 10\n WHEN 'bit' THEN 1\n WHEN 'bool' THEN 1\n WHEN 'varchar' THEN atttypmod - 4\n WHEN 'character varying' THEN atttypmod - 4\n WHEN 'char' THEN atttypmod - 4\n WHEN 'character' THEN atttypmod - 4\n WHEN 'nchar' THEN atttypmod - 4\n WHEN 'bpchar' THEN atttypmod - 4\n WHEN 'nvarchar' THEN atttypmod - 4\n WHEN 'date' THEN 13\n WHEN 'time' THEN 15\n WHEN 'time without time zone' THEN 15\n WHEN 'timetz' THEN 21\n WHEN 'time with time zone' THEN 21\n WHEN 'timestamp' THEN 29\n WHEN 'timestamp without time zone' THEN 29\n WHEN 'timestamptz' THEN 35\n WHEN 'timestamp with time zone' THEN 35\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 10\n WHEN 'int' THEN 10\n WHEN 'int4' THEN 10\n WHEN 'bigint' THEN 19\n WHEN 'int8' THEN 19\n WHEN 'decimal' THEN (atttypmod - 4) >> 16\n WHEN 'real' THEN 8\n WHEN 'float4' THEN 8\n WHEN 'double precision' THEN 17\n WHEN 'float8' THEN 17\n WHEN 'float' THEN 17\n WHEN 'numeric' THEN (atttypmod - 4) >> 16\n WHEN '_float4' THEN 8\n WHEN 'oid' THEN 10\n WHEN '_int4' THEN 10\n WHEN '_int2' THEN 5\n WHEN 'geometry' THEN NULL\n WHEN 'super' THEN NULL\n WHEN 'varbyte' THEN NULL\n WHEN 'geography' THEN NULL\n ELSE 2147483647\n END AS COLUMN_SIZE,\n NULL AS BUFFER_LENGTH,\n CASE typname\n WHEN 'float4' THEN 8\n WHEN 'float8' THEN 17\n WHEN 'numeric' THEN (atttypmod - 4) & 65535\n WHEN 'time without time zone' THEN 6\n WHEN 'timetz' THEN 6\n WHEN 'time with time zone' THEN 6\n WHEN 'timestamp without time zone' THEN 6\n WHEN 'timestamp' THEN 6\n WHEN 'geometry' THEN NULL\n WHEN 'super' THEN NULL\n WHEN 'varbyte' THEN NULL\n WHEN 'geography' THEN NULL\n ELSE 0\n END AS DECIMAL_DIGITS,\n CASE typname\n WHEN 'varbyte' THEN 2\n WHEN 'geography' THEN 2\n ELSE 10\n END AS NUM_PREC_RADIX,\n CASE a.attnotnull OR (t.typtype = 'd' AND t.typnotnull)\n WHEN 'false' THEN 1\n WHEN NULL THEN 2\n ELSE 0\n END AS NULLABLE,\n dsc.description AS REMARKS,\n pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS COLUMN_DEF,\n CAST(\n CASE typname\n WHEN 'text' THEN 12\n WHEN 'bit' THEN - 7\n WHEN 'bool' THEN - 7\n WHEN 'boolean' THEN - 7\n WHEN 'varchar' THEN 12\n WHEN 'character varying' THEN 12\n WHEN '\"char\"' THEN 1\n WHEN 'char' THEN 1\n WHEN 'character' THEN 1\n WHEN 'nchar' THEN 1\n WHEN 'bpchar' THEN 1\n WHEN 'nvarchar' THEN 12\n WHEN 'date' THEN 91\n WHEN 'time' THEN 92\n WHEN 'time without time zone' THEN 92\n WHEN 'timetz' THEN 2013\n WHEN 'time with time zone' THEN 2013\n WHEN 'timestamp with time zone' THEN 2014\n WHEN 'timestamp' THEN 93\n WHEN 'timestamp without time zone' THEN 93\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 4\n WHEN 'int' THEN 4\n WHEN 'int4' THEN 4\n WHEN 'bigint' THEN - 5\n WHEN 'int8' THEN - 5\n WHEN 'decimal' THEN 3\n WHEN 'real' THEN 7\n WHEN 'float4' THEN 7\n WHEN 'double precision' THEN 8\n WHEN 'float8' THEN 8\n WHEN 'float' THEN 6\n WHEN 'numeric' THEN 2\n WHEN '_float4' THEN 2003\n WHEN 'timestamptz' THEN 2014\n WHEN 'timestamp with time zone' THEN 2014\n WHEN '_aclitem' THEN 2003\n WHEN '_text' THEN 2003\n WHEN 'bytea' THEN - 2\n WHEN 'oid' THEN - 5\n WHEN 'name' THEN 12\n WHEN '_int4' THEN 2003\n WHEN '_int2' THEN 2003\n WHEN 'ARRAY' THEN 2003\n WHEN 'geometry' THEN - 4\n WHEN 'super' THEN - 16\n WHEN 'varbyte' THEN - 4\n WHEN 'geography' THEN - 4 ELSE 1111\n END\n AS SMALLINT\n ) AS SQL_DATA_TYPE,\n CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB,\n CASE typname\n WHEN 'int4' THEN 10\n WHEN 'bit' THEN 1\n WHEN 'bool' THEN 1\n WHEN 'varchar' THEN atttypmod - 4\n WHEN 'character varying' THEN atttypmod - 4\n WHEN 'char' THEN atttypmod - 4\n WHEN 'character' THEN atttypmod - 4\n WHEN 'nchar' THEN atttypmod - 4\n WHEN 'bpchar' THEN atttypmod - 4\n WHEN 'nvarchar' THEN atttypmod - 4\n WHEN 'date' THEN 13\n WHEN 'time' THEN 15\n WHEN 'time without time zone' THEN 15\n WHEN 'timetz' THEN 21\n WHEN 'time with time zone' THEN 21\n WHEN 'timestamp' THEN 29\n WHEN 'timestamp without time zone' THEN 29\n WHEN 'timestamptz' THEN 35\n WHEN 'timestamp with time zone' THEN 35\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 10\n WHEN 'int' THEN 10\n WHEN 'int4' THEN 10\n WHEN 'bigint' THEN 19\n WHEN 'int8' THEN 19\n WHEN 'decimal' THEN ((atttypmod - 4) >> 16) & 65535\n WHEN 'real' THEN 8\n WHEN 'float4' THEN 8\n WHEN 'double precision' THEN 17\n WHEN 'float8' THEN 17\n WHEN 'float' THEN 17\n WHEN 'numeric' THEN ((atttypmod - 4) >> 16) & 65535\n WHEN '_float4' THEN 8\n WHEN 'oid' THEN 10\n WHEN '_int4' THEN 10\n WHEN '_int2' THEN 5\n WHEN 'geometry' THEN NULL\n WHEN 'super' THEN NULL\n WHEN 'varbyte' THEN NULL\n WHEN 'geography' THEN NULL\n ELSE 2147483647\n END AS CHAR_OCTET_LENGTH,\n a.attnum AS ORDINAL_POSITION,\n CASE a.attnotnull OR (t.typtype = 'd' AND t.typnotnull)\n WHEN 'false' THEN 'YES'\n WHEN NULL THEN ''\n ELSE 'NO'\n END AS IS_NULLABLE,\n NULL AS SCOPE_CATALOG,\n NULL AS SCOPE_SCHEMA,\n NULL AS SCOPE_TABLE,\n t.typbasetype AS SOURCE_DATA_TYPE,\n CASE\n WHEN left(pg_catalog.pg_get_expr(def.adbin, def.adrelid), 16) = 'default_identity' THEN 'YES'\n ELSE 'NO'\n END AS IS_AUTOINCREMENT,\n false AS IS_GENERATEDCOLUMN\n FROM pg_catalog.pg_namespace AS n\n JOIN pg_catalog.pg_class AS c ON (c.relnamespace = n.oid)\n JOIN pg_catalog.pg_attribute AS a ON (a.attrelid = c.oid)\n JOIN pg_catalog.pg_type AS t ON (a.atttypid = t.oid)\n LEFT JOIN pg_catalog.pg_attrdef AS def ON (a.attrelid = def.adrelid AND a.attnum = def.adnum)\n LEFT JOIN pg_catalog.pg_description AS dsc ON (c.oid = dsc.objoid AND a.attnum = dsc.objsubid)\n LEFT JOIN pg_catalog.pg_class AS dc ON (dc.oid = dsc.classoid AND dc.relname = 'pg_class')\n LEFT JOIN pg_catalog.pg_namespace AS dn ON (dc.relnamespace = dn.oid AND dn.nspname = 'pg_catalog')\n WHERE\n a.attnum > 0 AND\n NOT a.attisdropped AND\n current_database() = 'cubedb' AND\n n.nspname LIKE 'public' AND\n c.relname LIKE 'KibanaSampleDataEcommerce'\n ORDER BY\n TABLE_SCHEM,\n c.relname,\n attnum\n ) AS t\n UNION ALL\n SELECT\n CAST(current_database() AS CHARACTER VARYING(128)) AS TABLE_CAT,\n CAST(schemaname AS CHARACTER VARYING(128)) AS table_schem,\n CAST(tablename AS CHARACTER VARYING(128)) AS table_name,\n CAST(columnname AS CHARACTER VARYING(128)) AS column_name,\n CAST(\n CASE columntype_rep\n WHEN 'text' THEN 12\n WHEN 'bit' THEN - 7\n WHEN 'bool' THEN - 7\n WHEN 'boolean' THEN - 7\n WHEN 'varchar' THEN 12\n WHEN 'character varying' THEN 12\n WHEN 'char' THEN 1\n WHEN 'character' THEN 1\n WHEN 'nchar' THEN 1\n WHEN 'bpchar' THEN 1\n WHEN 'nvarchar' THEN 12\n WHEN '\"char\"' THEN 1\n WHEN 'date' THEN 91\n WHEN 'time' THEN 92\n WHEN 'time without time zone' THEN 92\n WHEN 'timetz' THEN 2013\n WHEN 'time with time zone' THEN 2013\n WHEN 'timestamp' THEN 93\n WHEN 'timestamp without time zone' THEN 93\n WHEN 'timestamptz' THEN 2014\n WHEN 'timestamp with time zone' THEN 2014\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 4\n WHEN 'int' THEN 4\n WHEN 'int4' THEN 4\n WHEN 'bigint' THEN - 5\n WHEN 'int8' THEN - 5\n WHEN 'decimal' THEN 3\n WHEN 'real' THEN 7\n WHEN 'float4' THEN 7\n WHEN 'double precision' THEN 8\n WHEN 'float8' THEN 8\n WHEN 'float' THEN 6\n WHEN 'numeric' THEN 2\n WHEN 'timestamptz' THEN 2014\n WHEN 'bytea' THEN - 2\n WHEN 'oid' THEN - 5\n WHEN 'name' THEN 12\n WHEN 'ARRAY' THEN 2003\n WHEN 'geometry' THEN - 4\n WHEN 'super' THEN - 16\n WHEN 'varbyte' THEN - 4\n WHEN 'geography' THEN - 4\n ELSE 1111\n END\n AS SMALLINT\n ) AS DATA_TYPE,\n COALESCE(\n NULL,\n CASE columntype\n WHEN 'boolean' THEN 'bool'\n WHEN 'character varying' THEN 'varchar'\n WHEN '\"char\"' THEN 'char'\n WHEN 'smallint' THEN 'int2'\n WHEN 'integer' THEN 'int4'\n WHEN 'bigint' THEN 'int8'\n WHEN 'real' THEN 'float4'\n WHEN 'double precision' THEN 'float8'\n WHEN 'time without time zone' THEN 'time'\n WHEN 'time with time zone' THEN 'timetz'\n WHEN 'timestamp without time zone' THEN 'timestamp'\n WHEN 'timestamp with time zone' THEN 'timestamptz'\n ELSE columntype\n END\n ) AS TYPE_NAME,\n CASE columntype_rep\n WHEN 'int4' THEN 10\n WHEN 'bit' THEN 1\n WHEN 'bool' THEN 1\n WHEN 'boolean' THEN 1\n WHEN 'varchar' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN 'character varying' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN 'char' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 4), ''), '0') AS INT)\n WHEN 'character' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 4), ''), '0') AS INT)\n WHEN 'nchar' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN 'bpchar' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN 'nvarchar' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN 'date' THEN 13\n WHEN 'time' THEN 15\n WHEN 'time without time zone' THEN 15\n WHEN 'timetz' THEN 21\n WHEN 'timestamp' THEN 29\n WHEN 'timestamp without time zone' THEN 29\n WHEN 'time with time zone' THEN 21\n WHEN 'timestamptz' THEN 35\n WHEN 'timestamp with time zone' THEN 35\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 10\n WHEN 'int' THEN 10\n WHEN 'int4' THEN 10\n WHEN 'bigint' THEN 19\n WHEN 'int8' THEN 19\n WHEN 'decimal' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN 'real' THEN 8\n WHEN 'float4' THEN 8\n WHEN 'double precision' THEN 17\n WHEN 'float8' THEN 17\n WHEN 'float' THEN 17\n WHEN 'numeric' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN '_float4' THEN 8\n WHEN 'oid' THEN 10\n WHEN '_int4' THEN 10\n WHEN '_int2' THEN 5\n WHEN 'geometry' THEN NULL\n WHEN 'super' THEN NULL\n WHEN 'varbyte' THEN NULL\n WHEN 'geography' THEN NULL\n ELSE 2147483647\n END AS COLUMN_SIZE,\n NULL AS BUFFER_LENGTH,\n CASE REGEXP_REPLACE(columntype, '[()0-9,]')\n WHEN 'real' THEN 8\n WHEN 'float4' THEN 8\n WHEN 'double precision' THEN 17\n WHEN 'float8' THEN 17\n WHEN 'timestamp' THEN 6\n WHEN 'timestamp without time zone' THEN 6\n WHEN 'geometry' THEN NULL\n WHEN 'super' THEN NULL\n WHEN 'numeric' THEN CAST(regexp_substr(columntype, '[0-9]+', charindex(',', columntype)) AS INT)\n WHEN 'varbyte' THEN NULL\n WHEN 'geography' THEN NULL\n ELSE 0\n END AS DECIMAL_DIGITS,\n CASE columntype\n WHEN 'varbyte' THEN 2\n WHEN 'geography' THEN 2\n ELSE 10\n END AS NUM_PREC_RADIX,\n NULL AS NULLABLE,\n NULL AS REMARKS,\n NULL AS COLUMN_DEF,\n CAST(\n CASE columntype_rep\n WHEN 'text' THEN 12\n WHEN 'bit' THEN - 7\n WHEN 'bool' THEN - 7\n WHEN 'boolean' THEN - 7\n WHEN 'varchar' THEN 12\n WHEN 'character varying' THEN 12\n WHEN 'char' THEN 1\n WHEN 'character' THEN 1\n WHEN 'nchar' THEN 12\n WHEN 'bpchar' THEN 1\n WHEN 'nvarchar' THEN 12\n WHEN '\"char\"' THEN 1\n WHEN 'date' THEN 91\n WHEN 'time' THEN 92\n WHEN 'time without time zone' THEN 92\n WHEN 'timetz' THEN 2013\n WHEN 'time with time zone' THEN 2013\n WHEN 'timestamp' THEN 93\n WHEN 'timestamp without time zone' THEN 93\n WHEN 'timestamptz' THEN 2014\n WHEN 'timestamp with time zone' THEN 2014\n WHEN 'smallint' THEN 5\n WHEN 'int2' THEN 5\n WHEN 'integer' THEN 4\n WHEN 'int' THEN 4\n WHEN 'int4' THEN 4\n WHEN 'bigint' THEN - 5\n WHEN 'int8' THEN - 5\n WHEN 'decimal' THEN 3\n WHEN 'real' THEN 7\n WHEN 'float4' THEN 7\n WHEN 'double precision' THEN 8\n WHEN 'float8' THEN 8\n WHEN 'float' THEN 6\n WHEN 'numeric' THEN 2\n WHEN 'bytea' THEN - 2\n WHEN 'oid' THEN - 5\n WHEN 'name' THEN 12\n WHEN 'ARRAY' THEN 2003\n WHEN 'geometry' THEN - 4\n WHEN 'super' THEN - 16\n WHEN 'varbyte' THEN - 4\n WHEN 'geography' THEN - 4\n ELSE 1111\n END\n AS SMALLINT\n ) AS SQL_DATA_TYPE,\n CAST(NULL AS SMALLINT) AS SQL_DATETIME_SUB,\n CASE\n WHEN LEFT(columntype, 7) = 'varchar' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 7), ''), '0') AS INT)\n WHEN LEFT(columntype, 4) = 'char' THEN CAST(isnull(nullif(regexp_substr(columntype, '[0-9]+', 4), ''), '0') AS INT)\n WHEN columntype = 'string' THEN 16383\n ELSE NULL\n END AS CHAR_OCTET_LENGTH,\n columnnum AS ORDINAL_POSITION,\n NULL AS IS_NULLABLE,\n NULL AS SCOPE_CATALOG,\n NULL AS SCOPE_SCHEMA,\n NULL AS SCOPE_TABLE,\n NULL AS SOURCE_DATA_TYPE,\n 'NO' AS IS_AUTOINCREMENT,\n 'NO' AS IS_GENERATEDCOLUMN\n FROM (\n SELECT\n schemaname,\n tablename,\n columnname,\n columntype AS columntype_rep,\n columntype,\n columnnum\n FROM get_late_binding_view_cols_unpacked\n ) AS lbv_columns\n WHERE\n true AND\n current_database() = 'cubedb' AND\n schemaname LIKE 'public' AND\n tablename LIKE 'KibanaSampleDataEcommerce'\n ;\"#.to_string(),\nDatabaseProtocol::PostgreSQL).await?"
---
-+-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+---------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
-| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | COLUMN_NAME | DATA_TYPE | TYPE_NAME | COLUMN_SIZE | BUFFER_LENGTH | DECIMAL_DIGITS | NUM_PREC_RADIX | NULLABLE | REMARKS | COLUMN_DEF | SQL_DATA_TYPE | SQL_DATETIME_SUB | CHAR_OCTET_LENGTH | ORDINAL_POSITION | IS_NULLABLE | SCOPE_CATALOG | SCOPE_SCHEMA | SCOPE_TABLE | SOURCE_DATA_TYPE | IS_AUTOINCREMENT | IS_GENERATEDCOLUMN |
-+-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+---------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
-| cubedb | public | KibanaSampleDataEcommerce | count | -5 | int8 | 19 | NULL | 0 | 10 | NULL | NULL | NULL | -5 | NULL | 19 | 1 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | maxPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 2 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | sumPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 3 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | minPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 4 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | avgPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 5 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | countDistinct | -5 | int8 | 19 | NULL | 0 | 10 | NULL | NULL | NULL | -5 | NULL | 19 | 6 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | order_date | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 7 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | last_mod | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 8 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | customer_gender | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 9 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | notes | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 10 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | taxful_total_price | 2 | numeric | -1 | NULL | 65531 | 10 | 1 | NULL | NULL | 2 | NULL | 65535 | 11 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | has_subscription | -7 | bool | 1 | NULL | 0 | 10 | 1 | NULL | NULL | -7 | NULL | 1 | 12 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | is_male | -7 | bool | 1 | NULL | 0 | 10 | NULL | NULL | NULL | -7 | NULL | 1 | 13 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | is_female | -7 | bool | 1 | NULL | 0 | 10 | NULL | NULL | NULL | -7 | NULL | 1 | 14 | NULL | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | __user | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 15 | YES | NULL | NULL | NULL | 0 | NO | false |
-| cubedb | public | KibanaSampleDataEcommerce | __cubeJoinField | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 16 | YES | NULL | NULL | NULL | 0 | NO | false |
-+-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+---------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
++-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+-----------------------------------------------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
+| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | COLUMN_NAME | DATA_TYPE | TYPE_NAME | COLUMN_SIZE | BUFFER_LENGTH | DECIMAL_DIGITS | NUM_PREC_RADIX | NULLABLE | REMARKS | COLUMN_DEF | SQL_DATA_TYPE | SQL_DATETIME_SUB | CHAR_OCTET_LENGTH | ORDINAL_POSITION | IS_NULLABLE | SCOPE_CATALOG | SCOPE_SCHEMA | SCOPE_TABLE | SOURCE_DATA_TYPE | IS_AUTOINCREMENT | IS_GENERATEDCOLUMN |
++-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+-----------------------------------------------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
+| cubedb | public | KibanaSampleDataEcommerce | count | -5 | int8 | 19 | NULL | 0 | 10 | NULL | Events count | NULL | -5 | NULL | 19 | 1 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | maxPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 2 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | sumPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 3 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | minPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 4 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | avgPrice | 2 | numeric | -1 | NULL | 65531 | 10 | NULL | NULL | NULL | 2 | NULL | 65535 | 5 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | countDistinct | -5 | int8 | 19 | NULL | 0 | 10 | NULL | NULL | NULL | -5 | NULL | 19 | 6 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | order_date | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 7 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | last_mod | 93 | timestamp | 29 | NULL | 6 | 10 | 1 | NULL | NULL | 93 | NULL | 29 | 8 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | customer_gender | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | Customer gender | NULL | 12 | NULL | 2147483647 | 9 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | notes | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | NULL | NULL | 12 | NULL | 2147483647 | 10 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | taxful_total_price | 2 | numeric | -1 | NULL | 65531 | 10 | 1 | NULL | NULL | 2 | NULL | 65535 | 11 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | has_subscription | -7 | bool | 1 | NULL | 0 | 10 | 1 | NULL | NULL | -7 | NULL | 1 | 12 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | is_male | -7 | bool | 1 | NULL | 0 | 10 | NULL | Male users segment | NULL | -7 | NULL | 1 | 13 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | is_female | -7 | bool | 1 | NULL | 0 | 10 | NULL | NULL | NULL | -7 | NULL | 1 | 14 | NULL | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | __user | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | Virtual column for security context switching | NULL | 12 | NULL | 2147483647 | 15 | YES | NULL | NULL | NULL | 0 | NO | false |
+| cubedb | public | KibanaSampleDataEcommerce | __cubeJoinField | 12 | text | 2147483647 | NULL | 0 | 10 | 1 | Virtual column for joining cubes | NULL | 12 | NULL | 2147483647 | 16 | YES | NULL | NULL | NULL | 0 | NO | false |
++-----------+-------------+---------------------------+--------------------+-----------+-----------+-------------+---------------+----------------+----------------+----------+-----------------------------------------------+------------+---------------+------------------+-------------------+------------------+-------------+---------------+--------------+-------------+------------------+------------------+--------------------+
diff --git a/rust/cubesql/cubesql/src/transport/ctx.rs b/rust/cubesql/cubesql/src/transport/ctx.rs
index ae8141c571fc5..a3ceee2a4c7f4 100644
--- a/rust/cubesql/cubesql/src/transport/ctx.rs
+++ b/rust/cubesql/cubesql/src/transport/ctx.rs
@@ -22,6 +22,7 @@ pub struct CubeMetaTable {
pub record_oid: u32,
pub array_handler_oid: u32,
pub name: String,
+ pub description: Option,
pub columns: Vec,
}
@@ -29,6 +30,7 @@ pub struct CubeMetaTable {
pub struct CubeMetaColumn {
pub oid: u32,
pub name: String,
+ pub description: Option,
pub column_type: ColumnType,
pub can_be_null: bool,
}
@@ -49,12 +51,14 @@ impl MetaContext {
record_oid: oid_iter.next().unwrap_or(0),
array_handler_oid: oid_iter.next().unwrap_or(0),
name: cube.name.clone(),
+ description: cube.description.clone(),
columns: cube
.get_columns()
.iter()
.map(|column| CubeMetaColumn {
oid: oid_iter.next().unwrap_or(0),
name: column.get_name().clone(),
+ description: column.get_description().clone(),
column_type: column.get_column_type().clone(),
can_be_null: column.sql_can_be_null(),
})
diff --git a/rust/cubesql/cubesql/src/transport/ext.rs b/rust/cubesql/cubesql/src/transport/ext.rs
index 654069f8f6092..98e2d823f64d4 100644
--- a/rust/cubesql/cubesql/src/transport/ext.rs
+++ b/rust/cubesql/cubesql/src/transport/ext.rs
@@ -199,7 +199,7 @@ impl V1CubeMetaExt for CubeMeta {
columns.push(CubeColumn {
member_name: measure.name.clone(),
name: measure.get_real_name(),
- description: None,
+ description: measure.description.clone(),
column_type: measure.get_sql_type(),
can_be_null: false,
});
@@ -209,7 +209,7 @@ impl V1CubeMetaExt for CubeMeta {
columns.push(CubeColumn {
member_name: dimension.name.clone(),
name: dimension.get_real_name(),
- description: None,
+ description: dimension.description.clone(),
column_type: dimension.get_sql_type(),
can_be_null: dimension.sql_can_be_null(),
});
@@ -219,7 +219,7 @@ impl V1CubeMetaExt for CubeMeta {
columns.push(CubeColumn {
member_name: segment.name.clone(),
name: segment.get_real_name(),
- description: None,
+ description: segment.description.clone(),
column_type: ColumnType::Boolean,
can_be_null: false,
});
From 95beaa43e049c808c859cdab92e10e992ae49934 Mon Sep 17 00:00:00 2001
From: Dmitry Patsura
Date: Mon, 9 Sep 2024 10:43:19 +0200
Subject: [PATCH 009/415] v0.35.80
---
CHANGELOG.md | 17 ++++++++
lerna.json | 2 +-
packages/cubejs-api-gateway/CHANGELOG.md | 8 ++++
packages/cubejs-api-gateway/package.json | 4 +-
packages/cubejs-athena-driver/CHANGELOG.md | 8 ++++
packages/cubejs-athena-driver/package.json | 4 +-
packages/cubejs-backend-native/CHANGELOG.md | 11 +++++
packages/cubejs-backend-native/package.json | 4 +-
packages/cubejs-bigquery-driver/CHANGELOG.md | 8 ++++
packages/cubejs-bigquery-driver/package.json | 4 +-
packages/cubejs-cli/CHANGELOG.md | 8 ++++
packages/cubejs-cli/package.json | 6 +--
.../cubejs-clickhouse-driver/CHANGELOG.md | 8 ++++
.../cubejs-clickhouse-driver/package.json | 4 +-
packages/cubejs-crate-driver/CHANGELOG.md | 8 ++++
packages/cubejs-crate-driver/package.json | 6 +--
.../CHANGELOG.md | 8 ++++
.../package.json | 4 +-
.../cubejs-dbt-schema-extension/CHANGELOG.md | 8 ++++
.../cubejs-dbt-schema-extension/package.json | 6 +--
packages/cubejs-docker/CHANGELOG.md | 8 ++++
packages/cubejs-docker/package.json | 40 +++++++++----------
packages/cubejs-dremio-driver/CHANGELOG.md | 8 ++++
packages/cubejs-dremio-driver/package.json | 4 +-
packages/cubejs-druid-driver/CHANGELOG.md | 8 ++++
packages/cubejs-druid-driver/package.json | 4 +-
packages/cubejs-duckdb-driver/CHANGELOG.md | 8 ++++
packages/cubejs-duckdb-driver/package.json | 6 +--
packages/cubejs-firebolt-driver/CHANGELOG.md | 8 ++++
packages/cubejs-firebolt-driver/package.json | 6 +--
packages/cubejs-ksql-driver/CHANGELOG.md | 8 ++++
packages/cubejs-ksql-driver/package.json | 4 +-
.../cubejs-materialize-driver/CHANGELOG.md | 8 ++++
.../cubejs-materialize-driver/package.json | 6 +--
packages/cubejs-mysql-driver/CHANGELOG.md | 8 ++++
packages/cubejs-mysql-driver/package.json | 4 +-
packages/cubejs-postgres-driver/CHANGELOG.md | 8 ++++
packages/cubejs-postgres-driver/package.json | 4 +-
packages/cubejs-questdb-driver/CHANGELOG.md | 8 ++++
packages/cubejs-questdb-driver/package.json | 6 +--
packages/cubejs-redshift-driver/CHANGELOG.md | 8 ++++
packages/cubejs-redshift-driver/package.json | 4 +-
packages/cubejs-schema-compiler/CHANGELOG.md | 11 +++++
packages/cubejs-schema-compiler/package.json | 4 +-
packages/cubejs-server-core/CHANGELOG.md | 11 +++++
packages/cubejs-server-core/package.json | 8 ++--
packages/cubejs-server/CHANGELOG.md | 8 ++++
packages/cubejs-server/package.json | 6 +--
packages/cubejs-testing-drivers/CHANGELOG.md | 8 ++++
packages/cubejs-testing-drivers/package.json | 18 ++++-----
packages/cubejs-testing-shared/CHANGELOG.md | 8 ++++
packages/cubejs-testing-shared/package.json | 4 +-
packages/cubejs-testing/CHANGELOG.md | 8 ++++
packages/cubejs-testing/package.json | 8 ++--
packages/cubejs-trino-driver/CHANGELOG.md | 8 ++++
packages/cubejs-trino-driver/package.json | 4 +-
rust/cubesql/CHANGELOG.md | 12 ++++++
rust/cubesql/package.json | 2 +-
58 files changed, 347 insertions(+), 93 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8901385b30626..aa451ef07acd2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,23 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+
+### Bug Fixes
+
+* **schema-compiler:** propagate FILTER_PARAMS from view to inner cube's SELECT ([#8466](https://github.com/cube-js/cube/issues/8466)) ([c0466fd](https://github.com/cube-js/cube/commit/c0466fde9b7a3834159d7ec592362edcab6d9795))
+
+
+### Features
+
+* **cubesql:** Fill pg_description table with cube and members descriptions ([#8618](https://github.com/cube-js/cube/issues/8618)) ([2288c18](https://github.com/cube-js/cube/commit/2288c18bf30d1f3a3299b235fe9b4405d2cb7463))
+* **cubesql:** Support join with type coercion ([#8608](https://github.com/cube-js/cube/issues/8608)) ([46b3a36](https://github.com/cube-js/cube/commit/46b3a36936f0f00805144714f0dd87a3c50a5e0a))
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
diff --git a/lerna.json b/lerna.json
index d0b76af684c9d..369b30da80992 100644
--- a/lerna.json
+++ b/lerna.json
@@ -1,5 +1,5 @@
{
- "version": "0.35.79",
+ "version": "0.35.80",
"npmClient": "yarn",
"useWorkspaces": true,
"packages": [
diff --git a/packages/cubejs-api-gateway/CHANGELOG.md b/packages/cubejs-api-gateway/CHANGELOG.md
index 69ac919655d59..7feee16c649c7 100644
--- a/packages/cubejs-api-gateway/CHANGELOG.md
+++ b/packages/cubejs-api-gateway/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/api-gateway
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
diff --git a/packages/cubejs-api-gateway/package.json b/packages/cubejs-api-gateway/package.json
index 503d20fea508b..b06c058cc6ffc 100644
--- a/packages/cubejs-api-gateway/package.json
+++ b/packages/cubejs-api-gateway/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/api-gateway",
"description": "Cube.js API Gateway",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -27,7 +27,7 @@
"dist/src/*"
],
"dependencies": {
- "@cubejs-backend/native": "^0.35.79",
+ "@cubejs-backend/native": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"@ungap/structured-clone": "^0.3.4",
"body-parser": "^1.19.0",
diff --git a/packages/cubejs-athena-driver/CHANGELOG.md b/packages/cubejs-athena-driver/CHANGELOG.md
index 26ab5e6b3217c..ed85083cd3c1c 100644
--- a/packages/cubejs-athena-driver/CHANGELOG.md
+++ b/packages/cubejs-athena-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/athena-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/athena-driver
diff --git a/packages/cubejs-athena-driver/package.json b/packages/cubejs-athena-driver/package.json
index 7d01cf0cad96a..58df79e1265fc 100644
--- a/packages/cubejs-athena-driver/package.json
+++ b/packages/cubejs-athena-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/athena-driver",
"description": "Cube.js Athena database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -37,7 +37,7 @@
},
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.80",
"@types/ramda": "^0.27.40",
"typescript": "~5.2.2"
},
diff --git a/packages/cubejs-backend-native/CHANGELOG.md b/packages/cubejs-backend-native/CHANGELOG.md
index 7c9ef9fb7d169..274b72931988d 100644
--- a/packages/cubejs-backend-native/CHANGELOG.md
+++ b/packages/cubejs-backend-native/CHANGELOG.md
@@ -3,6 +3,17 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+
+### Features
+
+* **cubesql:** Support join with type coercion ([#8608](https://github.com/cube-js/cube/issues/8608)) ([46b3a36](https://github.com/cube-js/cube/commit/46b3a36936f0f00805144714f0dd87a3c50a5e0a))
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
diff --git a/packages/cubejs-backend-native/package.json b/packages/cubejs-backend-native/package.json
index 13462994322a3..517d23cb0b5a9 100644
--- a/packages/cubejs-backend-native/package.json
+++ b/packages/cubejs-backend-native/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-backend/native",
- "version": "0.35.79",
+ "version": "0.35.80",
"author": "Cube Dev, Inc.",
"description": "Native module for Cube.js (binding to Rust codebase)",
"main": "dist/js/index.js",
@@ -43,7 +43,7 @@
"uuid": "^8.3.2"
},
"dependencies": {
- "@cubejs-backend/cubesql": "^0.35.79",
+ "@cubejs-backend/cubesql": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"@cubejs-infra/post-installer": "^0.0.7"
},
diff --git a/packages/cubejs-bigquery-driver/CHANGELOG.md b/packages/cubejs-bigquery-driver/CHANGELOG.md
index de4493c882a65..c6afd088ecb1e 100644
--- a/packages/cubejs-bigquery-driver/CHANGELOG.md
+++ b/packages/cubejs-bigquery-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/bigquery-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/bigquery-driver
diff --git a/packages/cubejs-bigquery-driver/package.json b/packages/cubejs-bigquery-driver/package.json
index 3a970a7b48da9..8a66c1be371aa 100644
--- a/packages/cubejs-bigquery-driver/package.json
+++ b/packages/cubejs-bigquery-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/bigquery-driver",
"description": "Cube.js BigQuery database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -36,7 +36,7 @@
"ramda": "^0.27.2"
},
"devDependencies": {
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.80",
"@types/big.js": "^6.2.2",
"@types/dedent": "^0.7.0",
"@types/jest": "^27",
diff --git a/packages/cubejs-cli/CHANGELOG.md b/packages/cubejs-cli/CHANGELOG.md
index 74041a952226f..2ac4f7cfc3eb8 100644
--- a/packages/cubejs-cli/CHANGELOG.md
+++ b/packages/cubejs-cli/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package cubejs-cli
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package cubejs-cli
diff --git a/packages/cubejs-cli/package.json b/packages/cubejs-cli/package.json
index 19bcab23fb737..3bd4b21b1e76e 100644
--- a/packages/cubejs-cli/package.json
+++ b/packages/cubejs-cli/package.json
@@ -2,7 +2,7 @@
"name": "cubejs-cli",
"description": "Cube.js Command Line Interface",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -31,7 +31,7 @@
],
"dependencies": {
"@cubejs-backend/dotenv": "^9.0.2",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"chalk": "^2.4.2",
"cli-progress": "^3.10",
@@ -50,7 +50,7 @@
},
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/server": "^0.35.79",
+ "@cubejs-backend/server": "^0.35.80",
"@oclif/command": "^1.8.0",
"@types/cli-progress": "^3.8.0",
"@types/cross-spawn": "^6.0.2",
diff --git a/packages/cubejs-clickhouse-driver/CHANGELOG.md b/packages/cubejs-clickhouse-driver/CHANGELOG.md
index 4ec7a5b286f11..3c08631c82f2b 100644
--- a/packages/cubejs-clickhouse-driver/CHANGELOG.md
+++ b/packages/cubejs-clickhouse-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/clickhouse-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/clickhouse-driver
diff --git a/packages/cubejs-clickhouse-driver/package.json b/packages/cubejs-clickhouse-driver/package.json
index 2df31ed99e9a7..3f87cac36e811 100644
--- a/packages/cubejs-clickhouse-driver/package.json
+++ b/packages/cubejs-clickhouse-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/clickhouse-driver",
"description": "Cube.js ClickHouse database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -38,7 +38,7 @@
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.80",
"@types/jest": "^27",
"jest": "27",
"typescript": "~5.2.2"
diff --git a/packages/cubejs-crate-driver/CHANGELOG.md b/packages/cubejs-crate-driver/CHANGELOG.md
index 6eff588fda812..2c5575b987aa6 100644
--- a/packages/cubejs-crate-driver/CHANGELOG.md
+++ b/packages/cubejs-crate-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/crate-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/crate-driver
diff --git a/packages/cubejs-crate-driver/package.json b/packages/cubejs-crate-driver/package.json
index a37e10f89d2ed..31784164d9d1f 100644
--- a/packages/cubejs-crate-driver/package.json
+++ b/packages/cubejs-crate-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/crate-driver",
"description": "Cube.js Crate database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -28,14 +28,14 @@
"lint:fix": "eslint --fix src/* --ext .ts"
},
"dependencies": {
- "@cubejs-backend/postgres-driver": "^0.35.79",
+ "@cubejs-backend/postgres-driver": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"pg": "^8.7.1"
},
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.80",
"testcontainers": "^10.10.4",
"typescript": "~5.2.2"
},
diff --git a/packages/cubejs-databricks-jdbc-driver/CHANGELOG.md b/packages/cubejs-databricks-jdbc-driver/CHANGELOG.md
index 1591c48b9acbc..bc3243302318f 100644
--- a/packages/cubejs-databricks-jdbc-driver/CHANGELOG.md
+++ b/packages/cubejs-databricks-jdbc-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/databricks-jdbc-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
diff --git a/packages/cubejs-databricks-jdbc-driver/package.json b/packages/cubejs-databricks-jdbc-driver/package.json
index dd56a6b414289..d8c128b9ff36e 100644
--- a/packages/cubejs-databricks-jdbc-driver/package.json
+++ b/packages/cubejs-databricks-jdbc-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/databricks-jdbc-driver",
"description": "Cube.js Databricks database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"license": "Apache-2.0",
"repository": {
"type": "git",
@@ -33,7 +33,7 @@
"@azure/storage-blob": "^12.9.0",
"@cubejs-backend/base-driver": "^0.35.67",
"@cubejs-backend/jdbc-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"node-fetch": "^2.6.1",
"ramda": "^0.27.2",
diff --git a/packages/cubejs-dbt-schema-extension/CHANGELOG.md b/packages/cubejs-dbt-schema-extension/CHANGELOG.md
index b423caab746c8..0c34376e825f8 100644
--- a/packages/cubejs-dbt-schema-extension/CHANGELOG.md
+++ b/packages/cubejs-dbt-schema-extension/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/dbt-schema-extension
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/dbt-schema-extension
diff --git a/packages/cubejs-dbt-schema-extension/package.json b/packages/cubejs-dbt-schema-extension/package.json
index 04ea86c55770c..55a3df4a78453 100644
--- a/packages/cubejs-dbt-schema-extension/package.json
+++ b/packages/cubejs-dbt-schema-extension/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/dbt-schema-extension",
"description": "Cube.js dbt Schema Extension",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -25,14 +25,14 @@
"lint:fix": "eslint --fix src/* --ext .ts,.js"
},
"dependencies": {
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.80",
"fs-extra": "^9.1.0",
"inflection": "^1.12.0",
"node-fetch": "^2.6.1"
},
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing": "^0.35.79",
+ "@cubejs-backend/testing": "^0.35.80",
"@types/generic-pool": "^3.1.9",
"@types/jest": "^27",
"jest": "^27",
diff --git a/packages/cubejs-docker/CHANGELOG.md b/packages/cubejs-docker/CHANGELOG.md
index b33aba18c2b36..fb78cb93db336 100644
--- a/packages/cubejs-docker/CHANGELOG.md
+++ b/packages/cubejs-docker/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/docker
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/docker
diff --git a/packages/cubejs-docker/package.json b/packages/cubejs-docker/package.json
index dc575b609ea12..e13bb865f3c7d 100644
--- a/packages/cubejs-docker/package.json
+++ b/packages/cubejs-docker/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-backend/docker",
- "version": "0.35.79",
+ "version": "0.35.80",
"description": "Cube.js In Docker (virtual package)",
"author": "Cube Dev, Inc.",
"license": "Apache-2.0",
@@ -9,33 +9,33 @@
"node": "^14.0.0 || ^16.0.0 || >=17.0.0"
},
"dependencies": {
- "@cubejs-backend/athena-driver": "^0.35.79",
- "@cubejs-backend/bigquery-driver": "^0.35.79",
- "@cubejs-backend/clickhouse-driver": "^0.35.79",
- "@cubejs-backend/crate-driver": "^0.35.79",
- "@cubejs-backend/databricks-jdbc-driver": "^0.35.79",
- "@cubejs-backend/dbt-schema-extension": "^0.35.79",
- "@cubejs-backend/dremio-driver": "^0.35.79",
- "@cubejs-backend/druid-driver": "^0.35.79",
- "@cubejs-backend/duckdb-driver": "^0.35.79",
+ "@cubejs-backend/athena-driver": "^0.35.80",
+ "@cubejs-backend/bigquery-driver": "^0.35.80",
+ "@cubejs-backend/clickhouse-driver": "^0.35.80",
+ "@cubejs-backend/crate-driver": "^0.35.80",
+ "@cubejs-backend/databricks-jdbc-driver": "^0.35.80",
+ "@cubejs-backend/dbt-schema-extension": "^0.35.80",
+ "@cubejs-backend/dremio-driver": "^0.35.80",
+ "@cubejs-backend/druid-driver": "^0.35.80",
+ "@cubejs-backend/duckdb-driver": "^0.35.80",
"@cubejs-backend/elasticsearch-driver": "^0.35.67",
- "@cubejs-backend/firebolt-driver": "^0.35.79",
+ "@cubejs-backend/firebolt-driver": "^0.35.80",
"@cubejs-backend/hive-driver": "^0.35.67",
- "@cubejs-backend/ksql-driver": "^0.35.79",
- "@cubejs-backend/materialize-driver": "^0.35.79",
+ "@cubejs-backend/ksql-driver": "^0.35.80",
+ "@cubejs-backend/materialize-driver": "^0.35.80",
"@cubejs-backend/mongobi-driver": "^0.35.67",
"@cubejs-backend/mssql-driver": "^0.35.67",
- "@cubejs-backend/mysql-driver": "^0.35.79",
+ "@cubejs-backend/mysql-driver": "^0.35.80",
"@cubejs-backend/oracle-driver": "^0.35.67",
- "@cubejs-backend/postgres-driver": "^0.35.79",
+ "@cubejs-backend/postgres-driver": "^0.35.80",
"@cubejs-backend/prestodb-driver": "^0.35.67",
- "@cubejs-backend/questdb-driver": "^0.35.79",
- "@cubejs-backend/redshift-driver": "^0.35.79",
- "@cubejs-backend/server": "^0.35.79",
+ "@cubejs-backend/questdb-driver": "^0.35.80",
+ "@cubejs-backend/redshift-driver": "^0.35.80",
+ "@cubejs-backend/server": "^0.35.80",
"@cubejs-backend/snowflake-driver": "^0.35.67",
"@cubejs-backend/sqlite-driver": "^0.35.67",
- "@cubejs-backend/trino-driver": "^0.35.79",
- "cubejs-cli": "^0.35.79",
+ "@cubejs-backend/trino-driver": "^0.35.80",
+ "cubejs-cli": "^0.35.80",
"typescript": "~5.2.2"
},
"resolutions": {
diff --git a/packages/cubejs-dremio-driver/CHANGELOG.md b/packages/cubejs-dremio-driver/CHANGELOG.md
index 9b13d66090b7a..b346f641447cb 100644
--- a/packages/cubejs-dremio-driver/CHANGELOG.md
+++ b/packages/cubejs-dremio-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/dremio-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/dremio-driver
diff --git a/packages/cubejs-dremio-driver/package.json b/packages/cubejs-dremio-driver/package.json
index 81507ebec476f..1f37cb653be7d 100644
--- a/packages/cubejs-dremio-driver/package.json
+++ b/packages/cubejs-dremio-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/dremio-driver",
"description": "Cube.js Dremio driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -18,7 +18,7 @@
},
"dependencies": {
"@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"axios": "^0.21.1",
"moment-timezone": "^0.5.31",
diff --git a/packages/cubejs-druid-driver/CHANGELOG.md b/packages/cubejs-druid-driver/CHANGELOG.md
index bfd237b5f65af..ddcd4c08f5519 100644
--- a/packages/cubejs-druid-driver/CHANGELOG.md
+++ b/packages/cubejs-druid-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/druid-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/druid-driver
diff --git a/packages/cubejs-druid-driver/package.json b/packages/cubejs-druid-driver/package.json
index a99c69e61e858..50a62a1492992 100644
--- a/packages/cubejs-druid-driver/package.json
+++ b/packages/cubejs-druid-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/druid-driver",
"description": "Cube.js Druid database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"license": "Apache-2.0",
"repository": {
"type": "git",
@@ -29,7 +29,7 @@
],
"dependencies": {
"@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"axios": "^0.21.1",
"moment-timezone": "^0.5.31"
diff --git a/packages/cubejs-duckdb-driver/CHANGELOG.md b/packages/cubejs-duckdb-driver/CHANGELOG.md
index 25f4fad80a61d..1956d2a85cb8d 100644
--- a/packages/cubejs-duckdb-driver/CHANGELOG.md
+++ b/packages/cubejs-duckdb-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/duckdb-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/duckdb-driver
diff --git a/packages/cubejs-duckdb-driver/package.json b/packages/cubejs-duckdb-driver/package.json
index bfc22c9cbe401..0d8491a5ac02b 100644
--- a/packages/cubejs-duckdb-driver/package.json
+++ b/packages/cubejs-duckdb-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/duckdb-driver",
"description": "Cube DuckDB database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -28,14 +28,14 @@
},
"dependencies": {
"@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"duckdb": "^1.0.0"
},
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.80",
"@types/jest": "^27",
"@types/node": "^16",
"jest": "^27",
diff --git a/packages/cubejs-firebolt-driver/CHANGELOG.md b/packages/cubejs-firebolt-driver/CHANGELOG.md
index e2218d3ddd7af..0ada0959c2ee3 100644
--- a/packages/cubejs-firebolt-driver/CHANGELOG.md
+++ b/packages/cubejs-firebolt-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/firebolt-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/firebolt-driver
diff --git a/packages/cubejs-firebolt-driver/package.json b/packages/cubejs-firebolt-driver/package.json
index 595f54fb95c77..dd09e7f692052 100644
--- a/packages/cubejs-firebolt-driver/package.json
+++ b/packages/cubejs-firebolt-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/firebolt-driver",
"description": "Cube.js Firebolt database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -29,14 +29,14 @@
},
"dependencies": {
"@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"firebolt-sdk": "^1.2.0"
},
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.80",
"typescript": "~5.2.2"
},
"publishConfig": {
diff --git a/packages/cubejs-ksql-driver/CHANGELOG.md b/packages/cubejs-ksql-driver/CHANGELOG.md
index 5674a1e75b476..2e878e5625468 100644
--- a/packages/cubejs-ksql-driver/CHANGELOG.md
+++ b/packages/cubejs-ksql-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/ksql-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/ksql-driver
diff --git a/packages/cubejs-ksql-driver/package.json b/packages/cubejs-ksql-driver/package.json
index 72a3b4e477fc4..064d8004e11f9 100644
--- a/packages/cubejs-ksql-driver/package.json
+++ b/packages/cubejs-ksql-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/ksql-driver",
"description": "Cube.js ksql database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -26,7 +26,7 @@
},
"dependencies": {
"@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"async-mutex": "0.3.2",
"axios": "^0.21.1",
diff --git a/packages/cubejs-materialize-driver/CHANGELOG.md b/packages/cubejs-materialize-driver/CHANGELOG.md
index 3fbdc3a260bc6..b9bf68d0dd5dd 100644
--- a/packages/cubejs-materialize-driver/CHANGELOG.md
+++ b/packages/cubejs-materialize-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/materialize-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/materialize-driver
diff --git a/packages/cubejs-materialize-driver/package.json b/packages/cubejs-materialize-driver/package.json
index c94a92bb8303b..abbe3f9c8aea0 100644
--- a/packages/cubejs-materialize-driver/package.json
+++ b/packages/cubejs-materialize-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/materialize-driver",
"description": "Cube.js Materialize database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -28,7 +28,7 @@
},
"dependencies": {
"@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/postgres-driver": "^0.35.79",
+ "@cubejs-backend/postgres-driver": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"@types/pg": "^8.6.0",
"pg": "^8.6.0",
@@ -38,7 +38,7 @@
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing": "^0.35.79",
+ "@cubejs-backend/testing": "^0.35.80",
"typescript": "~5.2.2"
},
"publishConfig": {
diff --git a/packages/cubejs-mysql-driver/CHANGELOG.md b/packages/cubejs-mysql-driver/CHANGELOG.md
index 8ad803649b64b..600455fa747a1 100644
--- a/packages/cubejs-mysql-driver/CHANGELOG.md
+++ b/packages/cubejs-mysql-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/mysql-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/mysql-driver
diff --git a/packages/cubejs-mysql-driver/package.json b/packages/cubejs-mysql-driver/package.json
index eb6b465ae64a4..6bc4366234726 100644
--- a/packages/cubejs-mysql-driver/package.json
+++ b/packages/cubejs-mysql-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/mysql-driver",
"description": "Cube.js Mysql database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -35,7 +35,7 @@
},
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.80",
"@types/generic-pool": "^3.1.9",
"@types/jest": "^27",
"jest": "^27",
diff --git a/packages/cubejs-postgres-driver/CHANGELOG.md b/packages/cubejs-postgres-driver/CHANGELOG.md
index edda8532676ad..9e5819aab51c9 100644
--- a/packages/cubejs-postgres-driver/CHANGELOG.md
+++ b/packages/cubejs-postgres-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/postgres-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/postgres-driver
diff --git a/packages/cubejs-postgres-driver/package.json b/packages/cubejs-postgres-driver/package.json
index e6bd93cf7b632..051cec7c6aa14 100644
--- a/packages/cubejs-postgres-driver/package.json
+++ b/packages/cubejs-postgres-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/postgres-driver",
"description": "Cube.js Postgres database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -38,7 +38,7 @@
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.80",
"testcontainers": "^10.10.4",
"typescript": "~5.2.2"
},
diff --git a/packages/cubejs-questdb-driver/CHANGELOG.md b/packages/cubejs-questdb-driver/CHANGELOG.md
index 1d1549136d208..443686ac992db 100644
--- a/packages/cubejs-questdb-driver/CHANGELOG.md
+++ b/packages/cubejs-questdb-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/questdb-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/questdb-driver
diff --git a/packages/cubejs-questdb-driver/package.json b/packages/cubejs-questdb-driver/package.json
index 93d4b5ce6f70d..71d442cbb98fb 100644
--- a/packages/cubejs-questdb-driver/package.json
+++ b/packages/cubejs-questdb-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/questdb-driver",
"description": "Cube.js QuestDB database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -28,7 +28,7 @@
},
"dependencies": {
"@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"@types/pg": "^8.6.0",
"moment": "^2.24.0",
@@ -38,7 +38,7 @@
"license": "Apache-2.0",
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.80",
"testcontainers": "^10.10.4",
"typescript": "~5.2.2"
},
diff --git a/packages/cubejs-redshift-driver/CHANGELOG.md b/packages/cubejs-redshift-driver/CHANGELOG.md
index 1550450f58729..99689d63de4f8 100644
--- a/packages/cubejs-redshift-driver/CHANGELOG.md
+++ b/packages/cubejs-redshift-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/redshift-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/redshift-driver
diff --git a/packages/cubejs-redshift-driver/package.json b/packages/cubejs-redshift-driver/package.json
index 485073b704d8e..78bbf34f8e8e5 100644
--- a/packages/cubejs-redshift-driver/package.json
+++ b/packages/cubejs-redshift-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/redshift-driver",
"description": "Cube.js Redshift database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -28,7 +28,7 @@
"@aws-sdk/client-s3": "^3.17.0",
"@aws-sdk/s3-request-presigner": "^3.17.0",
"@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/postgres-driver": "^0.35.79",
+ "@cubejs-backend/postgres-driver": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67"
},
"license": "Apache-2.0",
diff --git a/packages/cubejs-schema-compiler/CHANGELOG.md b/packages/cubejs-schema-compiler/CHANGELOG.md
index 8d37163770d07..a2daa39a1130e 100644
--- a/packages/cubejs-schema-compiler/CHANGELOG.md
+++ b/packages/cubejs-schema-compiler/CHANGELOG.md
@@ -3,6 +3,17 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+
+### Bug Fixes
+
+* **schema-compiler:** propagate FILTER_PARAMS from view to inner cube's SELECT ([#8466](https://github.com/cube-js/cube/issues/8466)) ([c0466fd](https://github.com/cube-js/cube/commit/c0466fde9b7a3834159d7ec592362edcab6d9795))
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
diff --git a/packages/cubejs-schema-compiler/package.json b/packages/cubejs-schema-compiler/package.json
index 02cc4b08ac373..9f168f6357473 100644
--- a/packages/cubejs-schema-compiler/package.json
+++ b/packages/cubejs-schema-compiler/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/schema-compiler",
"description": "Cube schema compiler",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -40,7 +40,7 @@
"@babel/standalone": "^7.24",
"@babel/traverse": "^7.24",
"@babel/types": "^7.24",
- "@cubejs-backend/native": "^0.35.79",
+ "@cubejs-backend/native": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"antlr4ts": "0.5.0-alpha.4",
"camelcase": "^6.2.0",
diff --git a/packages/cubejs-server-core/CHANGELOG.md b/packages/cubejs-server-core/CHANGELOG.md
index b57a34031151b..aa9f0f195ba82 100644
--- a/packages/cubejs-server-core/CHANGELOG.md
+++ b/packages/cubejs-server-core/CHANGELOG.md
@@ -3,6 +3,17 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+
+### Bug Fixes
+
+* **schema-compiler:** propagate FILTER_PARAMS from view to inner cube's SELECT ([#8466](https://github.com/cube-js/cube/issues/8466)) ([c0466fd](https://github.com/cube-js/cube/commit/c0466fde9b7a3834159d7ec592362edcab6d9795))
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/server-core
diff --git a/packages/cubejs-server-core/package.json b/packages/cubejs-server-core/package.json
index 15bdce1744f0c..b9bac813d1ef8 100644
--- a/packages/cubejs-server-core/package.json
+++ b/packages/cubejs-server-core/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/server-core",
"description": "Cube.js base component to wire all backend components together",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -29,12 +29,12 @@
"unit": "jest --runInBand --forceExit --coverage dist/test"
},
"dependencies": {
- "@cubejs-backend/api-gateway": "^0.35.79",
+ "@cubejs-backend/api-gateway": "^0.35.80",
"@cubejs-backend/cloud": "^0.35.67",
"@cubejs-backend/dotenv": "^9.0.2",
- "@cubejs-backend/native": "^0.35.79",
+ "@cubejs-backend/native": "^0.35.80",
"@cubejs-backend/query-orchestrator": "^0.35.78",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"@cubejs-backend/templates": "^0.35.67",
"codesandbox-import-utils": "^2.1.12",
diff --git a/packages/cubejs-server/CHANGELOG.md b/packages/cubejs-server/CHANGELOG.md
index 9a92b9528d87f..8ccb75089ceb3 100644
--- a/packages/cubejs-server/CHANGELOG.md
+++ b/packages/cubejs-server/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/server
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/server
diff --git a/packages/cubejs-server/package.json b/packages/cubejs-server/package.json
index 3e7e3a67793ad..ce57b1e96a67c 100644
--- a/packages/cubejs-server/package.json
+++ b/packages/cubejs-server/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/server",
"description": "Cube.js all-in-one server",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"types": "index.d.ts",
"repository": {
"type": "git",
@@ -42,8 +42,8 @@
"dependencies": {
"@cubejs-backend/cubestore-driver": "^0.35.78",
"@cubejs-backend/dotenv": "^9.0.2",
- "@cubejs-backend/native": "^0.35.79",
- "@cubejs-backend/server-core": "^0.35.79",
+ "@cubejs-backend/native": "^0.35.80",
+ "@cubejs-backend/server-core": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"@oclif/color": "^1.0.0",
"@oclif/command": "^1.8.13",
diff --git a/packages/cubejs-testing-drivers/CHANGELOG.md b/packages/cubejs-testing-drivers/CHANGELOG.md
index 1cdbaa9bc6738..dd86f1ae3c8e2 100644
--- a/packages/cubejs-testing-drivers/CHANGELOG.md
+++ b/packages/cubejs-testing-drivers/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/testing-drivers
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/testing-drivers
diff --git a/packages/cubejs-testing-drivers/package.json b/packages/cubejs-testing-drivers/package.json
index af7eba46bd814..4201ad4214a0e 100644
--- a/packages/cubejs-testing-drivers/package.json
+++ b/packages/cubejs-testing-drivers/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-backend/testing-drivers",
- "version": "0.35.79",
+ "version": "0.35.80",
"description": "Cube.js drivers test suite",
"author": "Cube Dev, Inc.",
"license": "MIT",
@@ -46,22 +46,22 @@
"dist/src"
],
"dependencies": {
- "@cubejs-backend/athena-driver": "^0.35.79",
+ "@cubejs-backend/athena-driver": "^0.35.80",
"@cubejs-backend/base-driver": "^0.35.67",
- "@cubejs-backend/bigquery-driver": "^0.35.79",
- "@cubejs-backend/clickhouse-driver": "^0.35.79",
+ "@cubejs-backend/bigquery-driver": "^0.35.80",
+ "@cubejs-backend/clickhouse-driver": "^0.35.80",
"@cubejs-backend/cubestore-driver": "^0.35.78",
- "@cubejs-backend/databricks-jdbc-driver": "^0.35.79",
+ "@cubejs-backend/databricks-jdbc-driver": "^0.35.80",
"@cubejs-backend/dotenv": "^9.0.2",
"@cubejs-backend/linter": "^0.35.0",
"@cubejs-backend/mssql-driver": "^0.35.67",
- "@cubejs-backend/mysql-driver": "^0.35.79",
- "@cubejs-backend/postgres-driver": "^0.35.79",
+ "@cubejs-backend/mysql-driver": "^0.35.80",
+ "@cubejs-backend/postgres-driver": "^0.35.80",
"@cubejs-backend/query-orchestrator": "^0.35.78",
- "@cubejs-backend/server-core": "^0.35.79",
+ "@cubejs-backend/server-core": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"@cubejs-backend/snowflake-driver": "^0.35.67",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.80",
"@cubejs-client/core": "^0.35.23",
"@cubejs-client/ws-transport": "^0.35.23",
"@jest/globals": "^27",
diff --git a/packages/cubejs-testing-shared/CHANGELOG.md b/packages/cubejs-testing-shared/CHANGELOG.md
index bf5413bc5a079..2b50ab8071c60 100644
--- a/packages/cubejs-testing-shared/CHANGELOG.md
+++ b/packages/cubejs-testing-shared/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/testing-shared
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/testing-shared
diff --git a/packages/cubejs-testing-shared/package.json b/packages/cubejs-testing-shared/package.json
index 1e43f3e2286cf..dc0431853fe67 100644
--- a/packages/cubejs-testing-shared/package.json
+++ b/packages/cubejs-testing-shared/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-backend/testing-shared",
- "version": "0.35.79",
+ "version": "0.35.80",
"description": "Cube.js Testing Helpers",
"author": "Cube Dev, Inc.",
"license": "Apache-2.0",
@@ -22,7 +22,7 @@
"dependencies": {
"@cubejs-backend/dotenv": "^9.0.2",
"@cubejs-backend/query-orchestrator": "^0.35.78",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"dedent": "^0.7.0",
"node-fetch": "^2.6.7",
diff --git a/packages/cubejs-testing/CHANGELOG.md b/packages/cubejs-testing/CHANGELOG.md
index 9bc1e584d8833..046a668afacc0 100644
--- a/packages/cubejs-testing/CHANGELOG.md
+++ b/packages/cubejs-testing/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/testing
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/testing
diff --git a/packages/cubejs-testing/package.json b/packages/cubejs-testing/package.json
index 1da510b58e11a..c607441e95b33 100644
--- a/packages/cubejs-testing/package.json
+++ b/packages/cubejs-testing/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-backend/testing",
- "version": "0.35.79",
+ "version": "0.35.80",
"description": "Cube.js e2e tests",
"author": "Cube Dev, Inc.",
"license": "Apache-2.0",
@@ -91,11 +91,11 @@
"dependencies": {
"@cubejs-backend/cubestore-driver": "^0.35.78",
"@cubejs-backend/dotenv": "^9.0.2",
- "@cubejs-backend/postgres-driver": "^0.35.79",
+ "@cubejs-backend/postgres-driver": "^0.35.80",
"@cubejs-backend/query-orchestrator": "^0.35.78",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
- "@cubejs-backend/testing-shared": "^0.35.79",
+ "@cubejs-backend/testing-shared": "^0.35.80",
"@cubejs-client/ws-transport": "^0.35.23",
"dedent": "^0.7.0",
"fs-extra": "^8.1.0",
diff --git a/packages/cubejs-trino-driver/CHANGELOG.md b/packages/cubejs-trino-driver/CHANGELOG.md
index 137635b32a63a..522141eaccbcc 100644
--- a/packages/cubejs-trino-driver/CHANGELOG.md
+++ b/packages/cubejs-trino-driver/CHANGELOG.md
@@ -3,6 +3,14 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+**Note:** Version bump only for package @cubejs-backend/trino-driver
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/trino-driver
diff --git a/packages/cubejs-trino-driver/package.json b/packages/cubejs-trino-driver/package.json
index a78b95bcd71c8..b9e61df387a84 100644
--- a/packages/cubejs-trino-driver/package.json
+++ b/packages/cubejs-trino-driver/package.json
@@ -2,7 +2,7 @@
"name": "@cubejs-backend/trino-driver",
"description": "Cube.js Trino database driver",
"author": "Cube Dev, Inc.",
- "version": "0.35.79",
+ "version": "0.35.80",
"repository": {
"type": "git",
"url": "https://github.com/cube-js/cube.git",
@@ -27,7 +27,7 @@
"dependencies": {
"@cubejs-backend/base-driver": "^0.35.67",
"@cubejs-backend/prestodb-driver": "^0.35.67",
- "@cubejs-backend/schema-compiler": "^0.35.79",
+ "@cubejs-backend/schema-compiler": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
"presto-client": "^0.12.2",
"ramda": "^0.27.0",
diff --git a/rust/cubesql/CHANGELOG.md b/rust/cubesql/CHANGELOG.md
index d316a9473e7bb..6d28a9b1da705 100644
--- a/rust/cubesql/CHANGELOG.md
+++ b/rust/cubesql/CHANGELOG.md
@@ -3,6 +3,18 @@
All notable changes to this project will be documented in this file.
See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+## [0.35.80](https://github.com/cube-js/cube/compare/v0.35.79...v0.35.80) (2024-09-09)
+
+
+### Features
+
+* **cubesql:** Fill pg_description table with cube and members descriptions ([#8618](https://github.com/cube-js/cube/issues/8618)) ([2288c18](https://github.com/cube-js/cube/commit/2288c18bf30d1f3a3299b235fe9b4405d2cb7463))
+* **cubesql:** Support join with type coercion ([#8608](https://github.com/cube-js/cube/issues/8608)) ([46b3a36](https://github.com/cube-js/cube/commit/46b3a36936f0f00805144714f0dd87a3c50a5e0a))
+
+
+
+
+
## [0.35.79](https://github.com/cube-js/cube/compare/v0.35.78...v0.35.79) (2024-09-04)
**Note:** Version bump only for package @cubejs-backend/cubesql
diff --git a/rust/cubesql/package.json b/rust/cubesql/package.json
index b0adb96c4ead5..39b594d78c683 100644
--- a/rust/cubesql/package.json
+++ b/rust/cubesql/package.json
@@ -1,6 +1,6 @@
{
"name": "@cubejs-backend/cubesql",
- "version": "0.35.79",
+ "version": "0.35.80",
"description": "SQL API for Cube as proxy over MySQL protocol.",
"engines": {
"node": "^12.0.0 || ^14.0.0 || >=16.0.0"
From 33f3726cc8b26a9a61a93e74c1e9e4b4756bd55f Mon Sep 17 00:00:00 2001
From: morgan-at-cube <153563892+morgan-at-cube@users.noreply.github.com>
Date: Mon, 9 Sep 2024 04:06:37 -0700
Subject: [PATCH 010/415] docs: Update environments.mdx (#8681)
Explain how dev environments are allocated one per user and how they are accessed.
I mentioned this twice. If you think it's overkill, we can remove one :)
---
docs/pages/product/workspace/environments.mdx | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/docs/pages/product/workspace/environments.mdx b/docs/pages/product/workspace/environments.mdx
index 1e93631192c03..4a8f0b6012abb 100644
--- a/docs/pages/product/workspace/environments.mdx
+++ b/docs/pages/product/workspace/environments.mdx
@@ -3,13 +3,17 @@
An environment provides access to your data model. Cube Cloud provides the following environments:
- production (default)
- staging - providing access to the data model on a specific branch.
- Each branch in the repository corresponds to a separate staging environment.
+ Each branch in the repository corresponds to a separate staging environment.
+ Changes must be committed to a branch to be viewable in this environment.
- development - providing access to the data model that you are currently working on.
The development environment is automatically created when you enter [development mode][ref-dev-mode].
- It tracks the branch you're on and is updated automatically when you make changes to the data model.
+ One development environment is allocated per user.
+ It tracks the branch you're currently on and is updated automatically when you save changes to the data model.
+ You cannot query the development enviornment unless your user is in dev mode on the branch you are trying to access.
Each environment provides its own set of API and SQL API endpoints.
-You can access them on the [Data Model][ref-data-model]'s Overview page and BI Integrations SQL API Connection tab.
+You can reference them on the [Data Model][ref-data-model]'s Overview page and BI Integrations SQL API Connection tab.
+To query a development environment's API endpoints, your user must be in dev mode and on the branch that has the saved changes.
Date: Mon, 9 Sep 2024 04:08:00 -0700
Subject: [PATCH 011/415] docs: Update dbt.mdx (#8680)
Add description from dbt manifest
---
docs/pages/guides/dbt.mdx | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/docs/pages/guides/dbt.mdx b/docs/pages/guides/dbt.mdx
index 86616de28f962..36087b2efdb35 100644
--- a/docs/pages/guides/dbt.mdx
+++ b/docs/pages/guides/dbt.mdx
@@ -263,6 +263,7 @@ cubes:
- name: "{{ column.name }}"
sql: "{{ column.sql }}"
type: "{{ column.type }}"
+ description: "{{ column.description }}"
meta:
source: dbt
{% endfor %}
@@ -361,4 +362,4 @@ of the REST API.
[link-dbt-docs-columns]: https://docs.getdbt.com/reference/resource-properties/columns
[link-dbt-materializations]: https://docs.getdbt.com/docs/build/materializations
[link-smart-open]: https://pypi.org/project/smart-open/
-[link-boto3]: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-examples.html
\ No newline at end of file
+[link-boto3]: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-examples.html
From 863f3709e97c904f1c800ad98889dc272dbfddbd Mon Sep 17 00:00:00 2001
From: Ryan Woodring
Date: Mon, 9 Sep 2024 11:19:47 -0400
Subject: [PATCH 012/415] fix(api-gateway): fixes an issue where queries to get
the total count of results were incorrectly applying sorting from the
original query and also were getting default ordering applied when the query
ordering was stripped out (#8060) Thanks @rdwoodring!
* fix(api-gateway) fix(schema-compiler): fixes an issue where queries to get the total count of results were incorrectly applying sorting from the original query and also were getting default ordering applied when the query ordering was stripped out
* fix(api-gateway): add missing semicolon
---------
Co-authored-by: Ryan Woodring
---
packages/cubejs-api-gateway/src/gateway.ts | 4 ++++
packages/cubejs-schema-compiler/src/adapter/BaseQuery.js | 2 +-
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/packages/cubejs-api-gateway/src/gateway.ts b/packages/cubejs-api-gateway/src/gateway.ts
index 5ee0f2af0ab25..6f2d0fa4e0f26 100644
--- a/packages/cubejs-api-gateway/src/gateway.ts
+++ b/packages/cubejs-api-gateway/src/gateway.ts
@@ -1552,9 +1552,13 @@ class ApiGateway {
if (normalizedQuery.total) {
const normalizedTotal = structuredClone(normalizedQuery);
normalizedTotal.totalQuery = true;
+
+ delete normalizedTotal.order;
+
normalizedTotal.limit = null;
normalizedTotal.rowLimit = null;
normalizedTotal.offset = null;
+
const [totalQuery] = await this.getSqlQueriesInternal(
context,
[normalizedTotal],
diff --git a/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js b/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js
index 07ad2fdc528ff..9a0de0cd60cf3 100644
--- a/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js
+++ b/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js
@@ -387,7 +387,7 @@ export class BaseQuery {
}
defaultOrder() {
- if (this.options.preAggregationQuery) {
+ if (this.options.preAggregationQuery || this.options.totalQuery) {
return [];
}
From ca7c292e0122be50ac7adc9b9d4910623d19f840 Mon Sep 17 00:00:00 2001
From: jlloyd-widen <82222659+jlloyd-widen@users.noreply.github.com>
Date: Mon, 9 Sep 2024 09:25:41 -0600
Subject: [PATCH 013/415] fix: Updated jsonwebtoken in all packages (#8282)
Thanks @jlloyd-widen !
* updated jsonwebtoken in all packages
* fix linting error, address other CVE
* address tsc failure
* reverted the jsdom change in playground
* fixed yarn lock file
* added the jsdom update back in
---
packages/cubejs-api-gateway/package.json | 4 +-
packages/cubejs-backend-cloud/package.json | 2 +-
packages/cubejs-cli/package.json | 4 +-
packages/cubejs-client-vue/package.json | 1 +
packages/cubejs-playground/package.json | 2 +-
packages/cubejs-server-core/package.json | 4 +-
packages/cubejs-server/package.json | 2 +-
packages/cubejs-testing-drivers/package.json | 2 +-
packages/cubejs-testing/package.json | 2 +-
packages/cubejs-testing/src/birdbox.ts | 2 +-
yarn.lock | 207 ++++++++++++++++---
11 files changed, 197 insertions(+), 35 deletions(-)
diff --git a/packages/cubejs-api-gateway/package.json b/packages/cubejs-api-gateway/package.json
index b06c058cc6ffc..dae3c9f308a03 100644
--- a/packages/cubejs-api-gateway/package.json
+++ b/packages/cubejs-api-gateway/package.json
@@ -39,7 +39,7 @@
"http-proxy-middleware": "^3.0.0",
"inflection": "^1.12.0",
"joi": "^17.8.3",
- "jsonwebtoken": "^8.3.0",
+ "jsonwebtoken": "^9.0.2",
"jwk-to-pem": "^2.0.4",
"moment": "^2.24.0",
"moment-timezone": "^0.5.27",
@@ -52,7 +52,7 @@
"@cubejs-backend/linter": "^0.35.0",
"@types/express": "^4.17.9",
"@types/jest": "^27",
- "@types/jsonwebtoken": "^8.5.0",
+ "@types/jsonwebtoken": "^9.0.2",
"@types/jwk-to-pem": "^2.0.0",
"@types/mysql": "^2.15.19",
"@types/node-fetch": "^2.5.8",
diff --git a/packages/cubejs-backend-cloud/package.json b/packages/cubejs-backend-cloud/package.json
index 41f126dd162ff..f44a59285930f 100644
--- a/packages/cubejs-backend-cloud/package.json
+++ b/packages/cubejs-backend-cloud/package.json
@@ -36,7 +36,7 @@
"chokidar": "^3.5.1",
"env-var": "^6.3.0",
"fs-extra": "^9.1.0",
- "jsonwebtoken": "^8.5.1",
+ "jsonwebtoken": "^9.0.2",
"request": "^2.88.2",
"request-promise": "^4.2.5"
},
diff --git a/packages/cubejs-cli/package.json b/packages/cubejs-cli/package.json
index 3bd4b21b1e76e..c2c1881c7c51d 100644
--- a/packages/cubejs-cli/package.json
+++ b/packages/cubejs-cli/package.json
@@ -39,7 +39,7 @@
"cross-spawn": "^7.0.1",
"fs-extra": "^8.1.0",
"inquirer": "^7.1.0",
- "jsonwebtoken": "^8.5.1",
+ "jsonwebtoken": "^9.0.2",
"request": "^2.88.2",
"request-promise": "^4.2.5",
"semver": "^7.3.2",
@@ -57,7 +57,7 @@
"@types/fs-extra": "^9.0.2",
"@types/inquirer": "^7.3.1",
"@types/jest": "^27",
- "@types/jsonwebtoken": "^8.5.0",
+ "@types/jsonwebtoken": "^9.0.2",
"@types/node": "^14",
"@types/request-promise": "^4.1.46",
"@types/semver": "^7.3.4",
diff --git a/packages/cubejs-client-vue/package.json b/packages/cubejs-client-vue/package.json
index 775378a5bc8c1..5c3f754bea4a9 100644
--- a/packages/cubejs-client-vue/package.json
+++ b/packages/cubejs-client-vue/package.json
@@ -33,6 +33,7 @@
"ramda": "^0.27.2"
},
"devDependencies": {
+ "@babel/plugin-proposal-optional-chaining": "^7.21.0",
"@vue/babel-preset-app": "^5",
"@vue/cli-plugin-babel": "^5",
"@vue/cli-plugin-eslint": "^5",
diff --git a/packages/cubejs-playground/package.json b/packages/cubejs-playground/package.json
index 1f87bf7064e81..02a110f5e2ca4 100644
--- a/packages/cubejs-playground/package.json
+++ b/packages/cubejs-playground/package.json
@@ -79,7 +79,7 @@
"eslint-plugin-react": "^7.20.0",
"fs-extra": "^8.1.0",
"graphql": "^15.8.0",
- "jsdom": "^16.7.0",
+ "jsdom": "^24.0.0",
"prismjs": "^1.25.0",
"react": "^17.0.1",
"react-dom": "^17.0.1",
diff --git a/packages/cubejs-server-core/package.json b/packages/cubejs-server-core/package.json
index b9bac813d1ef8..603192007b2e5 100644
--- a/packages/cubejs-server-core/package.json
+++ b/packages/cubejs-server-core/package.json
@@ -42,7 +42,7 @@
"fs-extra": "^8.1.0",
"is-docker": "^2.1.1",
"joi": "^17.8.3",
- "jsonwebtoken": "^8.4.0",
+ "jsonwebtoken": "^9.0.2",
"lodash.clonedeep": "^4.5.0",
"lru-cache": "^5.1.1",
"moment": "^2.29.1",
@@ -64,7 +64,7 @@
"@types/express": "^4.17.9",
"@types/fs-extra": "^9.0.8",
"@types/jest": "^27",
- "@types/jsonwebtoken": "^8.5.0",
+ "@types/jsonwebtoken": "^9.0.2",
"@types/lru-cache": "^5.1.0",
"@types/node": "^16",
"@types/node-fetch": "^2.5.7",
diff --git a/packages/cubejs-server/package.json b/packages/cubejs-server/package.json
index ce57b1e96a67c..24134a2581c38 100644
--- a/packages/cubejs-server/package.json
+++ b/packages/cubejs-server/package.json
@@ -55,7 +55,7 @@
"codesandbox-import-utils": "^2.1.12",
"cors": "^2.8.4",
"express": "^4.17.1",
- "jsonwebtoken": "^8.4.0",
+ "jsonwebtoken": "^9.0.2",
"semver": "^7.3.2",
"source-map-support": "^0.5.19",
"ws": "^7.1.2"
diff --git a/packages/cubejs-testing-drivers/package.json b/packages/cubejs-testing-drivers/package.json
index 4201ad4214a0e..6a820390a2de4 100644
--- a/packages/cubejs-testing-drivers/package.json
+++ b/packages/cubejs-testing-drivers/package.json
@@ -70,7 +70,7 @@
"dotenv": "^16.0.3",
"fs-extra": "^11.1.1",
"jest": "^27",
- "jsonwebtoken": "^8.5.1",
+ "jsonwebtoken": "^9.0.2",
"pg": "^8.7.3",
"ramda": "^0.28.0",
"testcontainers": "^10.10.4",
diff --git a/packages/cubejs-testing/package.json b/packages/cubejs-testing/package.json
index c607441e95b33..87f88198519a1 100644
--- a/packages/cubejs-testing/package.json
+++ b/packages/cubejs-testing/package.json
@@ -122,7 +122,7 @@
"eslint-plugin-cypress": "^2.12.1",
"globby": "^11.0.4",
"jest": "^27",
- "jsonwebtoken": "^8.5.1",
+ "jsonwebtoken": "^9.0.2",
"jwt-decode": "^3.1.2",
"pg": "^8.7.3",
"typescript": "~5.2.2",
diff --git a/packages/cubejs-testing/src/birdbox.ts b/packages/cubejs-testing/src/birdbox.ts
index 17954fa961d13..b357abab4dd18 100644
--- a/packages/cubejs-testing/src/birdbox.ts
+++ b/packages/cubejs-testing/src/birdbox.ts
@@ -363,7 +363,7 @@ export async function startBirdBoxFromContainer(
proxyServer.on('error', async (err, req, res: any) => {
process.stderr.write(`[Proxy Server] error: ${err}\n`);
- if (!res.headersSent) {
+ if ('headersSent' in res && !res.headersSent) {
res.writeHead(500, { 'content-type': 'application/json' });
}
diff --git a/yarn.lock b/yarn.lock
index 3baba27a6cc46..6af9573d2f781 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1730,7 +1730,7 @@
dependencies:
"@babel/types" "^7.16.0"
-"@babel/helper-skip-transparent-expression-wrappers@^7.22.5":
+"@babel/helper-skip-transparent-expression-wrappers@^7.20.0", "@babel/helper-skip-transparent-expression-wrappers@^7.22.5":
version "7.22.5"
resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz#007f15240b5751c537c40e77abb4e89eeaaa8847"
integrity sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==
@@ -2069,6 +2069,15 @@
"@babel/helper-skip-transparent-expression-wrappers" "^7.16.0"
"@babel/plugin-syntax-optional-chaining" "^7.8.3"
+"@babel/plugin-proposal-optional-chaining@^7.21.0":
+ version "7.21.0"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.21.0.tgz#886f5c8978deb7d30f678b2e24346b287234d3ea"
+ integrity sha512-p4zeefM72gpmEe2fkUr/OnOXpWEf8nAgk7ZYVqqfFiyIG7oFfVZcCrU64hWn5xp4tQ9LkV4bTIa5rD0KANpKNA==
+ dependencies:
+ "@babel/helper-plugin-utils" "^7.20.2"
+ "@babel/helper-skip-transparent-expression-wrappers" "^7.20.0"
+ "@babel/plugin-syntax-optional-chaining" "^7.8.3"
+
"@babel/plugin-proposal-private-methods@^7.14.5", "@babel/plugin-proposal-private-methods@^7.16.5":
version "7.16.5"
resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.16.5.tgz#2086f7d78c1b0c712d49b5c3fbc2d1ca21a7ee12"
@@ -8638,10 +8647,10 @@
resolved "https://registry.yarnpkg.com/@types/json5/-/json5-0.0.29.tgz#ee28707ae94e11d2b827bcbe5270bcea7f3e71ee"
integrity sha1-7ihweulOEdK4J7y+UnC86n8+ce4=
-"@types/jsonwebtoken@^8.5.0":
- version "8.5.6"
- resolved "https://registry.yarnpkg.com/@types/jsonwebtoken/-/jsonwebtoken-8.5.6.tgz#1913e5a61e70a192c5a444623da4901a7b1a9d42"
- integrity sha512-+P3O/xC7nzVizIi5VbF34YtqSonFsdnbXBnWUCYRiKOi1f9gA4sEFvXkrGr/QVV23IbMYvcoerI7nnhDUiWXRQ==
+"@types/jsonwebtoken@^9.0.2":
+ version "9.0.6"
+ resolved "https://registry.yarnpkg.com/@types/jsonwebtoken/-/jsonwebtoken-9.0.6.tgz#d1af3544d99ad992fb6681bbe60676e06b032bd3"
+ integrity sha512-/5hndP5dCjloafCXns6SZyESp3Ldq7YjH3zwzwczYnjxIT0Fqzk5ROSYVGfFyczIue7IUEj8hkvLbPoLQ18vQw==
dependencies:
"@types/node" "*"
@@ -13308,6 +13317,13 @@ cssstyle@^2.3.0:
dependencies:
cssom "~0.3.6"
+cssstyle@^4.0.1:
+ version "4.0.1"
+ resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-4.0.1.tgz#ef29c598a1e90125c870525490ea4f354db0660a"
+ integrity sha512-8ZYiJ3A/3OkDd093CBT/0UKDWry7ak4BdPTFP2+QEP7cmhouyq/Up709ASSj2cK02BbZiMgk7kYjZNS4QP5qrQ==
+ dependencies:
+ rrweb-cssom "^0.6.0"
+
csstype@^3.0.2:
version "3.0.10"
resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.0.10.tgz#2ad3a7bed70f35b965707c092e5f30b327c290e5"
@@ -13469,6 +13485,14 @@ data-urls@^2.0.0:
whatwg-mimetype "^2.3.0"
whatwg-url "^8.0.0"
+data-urls@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.yarnpkg.com/data-urls/-/data-urls-5.0.0.tgz#2f76906bce1824429ffecb6920f45a0b30f00dde"
+ integrity sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg==
+ dependencies:
+ whatwg-mimetype "^4.0.0"
+ whatwg-url "^14.0.0"
+
data-view-buffer@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/data-view-buffer/-/data-view-buffer-1.0.1.tgz#8ea6326efec17a2e42620696e671d7d5a8bc66b2"
@@ -13611,6 +13635,11 @@ decimal.js@^10.2.1:
resolved "https://registry.yarnpkg.com/decimal.js/-/decimal.js-10.3.1.tgz#d8c3a444a9c6774ba60ca6ad7261c3a94fd5e783"
integrity sha512-V0pfhfr8suzyPGOx3nmq4aHqabehUZn6Ch9kyFpV79TGDTWFmHqUqXdabR7QHqxzrYolF4+tVmJhUG4OURg5dQ==
+decimal.js@^10.4.3:
+ version "10.4.3"
+ resolved "https://registry.yarnpkg.com/decimal.js/-/decimal.js-10.4.3.tgz#1044092884d245d1b7f65725fa4ad4c6f781cc23"
+ integrity sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==
+
decode-uri-component@^0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545"
@@ -14412,6 +14441,11 @@ entities@^2.0.0:
resolved "https://registry.yarnpkg.com/entities/-/entities-2.2.0.tgz#098dc90ebb83d8dffa089d55256b351d34c4da55"
integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==
+entities@^4.4.0:
+ version "4.5.0"
+ resolved "https://registry.yarnpkg.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48"
+ integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==
+
entities@~2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/entities/-/entities-2.1.0.tgz#992d3129cf7df6870b96c57858c249a120f8b8b5"
@@ -17138,6 +17172,13 @@ html-encoding-sniffer@^2.0.1:
dependencies:
whatwg-encoding "^1.0.5"
+html-encoding-sniffer@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz#696df529a7cfd82446369dc5193e590a3735b448"
+ integrity sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==
+ dependencies:
+ whatwg-encoding "^3.1.1"
+
html-entities@^1.3.1:
version "1.4.0"
resolved "https://registry.yarnpkg.com/html-entities/-/html-entities-1.4.0.tgz#cfbd1b01d2afaf9adca1b10ae7dffab98c71d2dc"
@@ -17384,7 +17425,7 @@ https-proxy-agent@^5.0.1:
agent-base "6"
debug "4"
-https-proxy-agent@^7.0.0:
+https-proxy-agent@^7.0.0, https-proxy-agent@^7.0.2:
version "7.0.4"
resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-7.0.4.tgz#8e97b841a029ad8ddc8731f26595bad868cb4168"
integrity sha512-wlwpilI7YdjSkWaQ/7omYBMTliDcmCN8OLihO6I9B86g06lMyAoqgoDpV0XqoaPOKj+0DIdAvnsWfyAAhmimcg==
@@ -17460,7 +17501,7 @@ iconv-lite@0.4.24, iconv-lite@^0.4.15, iconv-lite@^0.4.24, iconv-lite@^0.4.4:
dependencies:
safer-buffer ">= 2.1.2 < 3"
-iconv-lite@^0.6.2, iconv-lite@^0.6.3:
+iconv-lite@0.6.3, iconv-lite@^0.6.2, iconv-lite@^0.6.3:
version "0.6.3"
resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.6.3.tgz#a52f80bf38da1952eb5c681790719871a1a72501"
integrity sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==
@@ -19124,7 +19165,7 @@ jsbn@~0.1.0:
resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
integrity sha1-peZUwuWi3rXyAdls77yoDA7y9RM=
-jsdom@^16.6.0, jsdom@^16.7.0:
+jsdom@^16.6.0:
version "16.7.0"
resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-16.7.0.tgz#918ae71965424b197c819f8183a754e18977b710"
integrity sha512-u9Smc2G1USStM+s/x1ru5Sxrl6mPYCbByG1U/hUmqaVsm4tbNyS7CicOSRyuGQYZhTu0h84qkZZQ/I+dzizSVw==
@@ -19157,6 +19198,33 @@ jsdom@^16.6.0, jsdom@^16.7.0:
ws "^7.4.6"
xml-name-validator "^3.0.0"
+jsdom@^24.0.0:
+ version "24.0.0"
+ resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-24.0.0.tgz#e2dc04e4c79da368481659818ee2b0cd7c39007c"
+ integrity sha512-UDS2NayCvmXSXVP6mpTj+73JnNQadZlr9N68189xib2tx5Mls7swlTNao26IoHv46BZJFvXygyRtyXd1feAk1A==
+ dependencies:
+ cssstyle "^4.0.1"
+ data-urls "^5.0.0"
+ decimal.js "^10.4.3"
+ form-data "^4.0.0"
+ html-encoding-sniffer "^4.0.0"
+ http-proxy-agent "^7.0.0"
+ https-proxy-agent "^7.0.2"
+ is-potential-custom-element-name "^1.0.1"
+ nwsapi "^2.2.7"
+ parse5 "^7.1.2"
+ rrweb-cssom "^0.6.0"
+ saxes "^6.0.0"
+ symbol-tree "^3.2.4"
+ tough-cookie "^4.1.3"
+ w3c-xmlserializer "^5.0.0"
+ webidl-conversions "^7.0.0"
+ whatwg-encoding "^3.1.1"
+ whatwg-mimetype "^4.0.0"
+ whatwg-url "^14.0.0"
+ ws "^8.16.0"
+ xml-name-validator "^5.0.0"
+
jsesc@^2.5.1:
version "2.5.2"
resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4"
@@ -19288,10 +19356,20 @@ jsonparse@^1.2.0, jsonparse@^1.3.1:
resolved "https://registry.yarnpkg.com/jsonparse/-/jsonparse-1.3.1.tgz#3f4dae4a91fac315f71062f8521cc239f1366280"
integrity sha1-P02uSpH6wxX3EGL4UhzCOfE2YoA=
-jsonwebtoken@^8.3.0, jsonwebtoken@^8.4.0, jsonwebtoken@^8.5.1:
- version "8.5.1"
- resolved "https://registry.yarnpkg.com/jsonwebtoken/-/jsonwebtoken-8.5.1.tgz#00e71e0b8df54c2121a1f26137df2280673bcc0d"
- integrity sha512-XjwVfRS6jTMsqYs0EsuJ4LGxXV14zQybNd4L2r0UvbVnSF9Af8x7p5MzbJ90Ioz/9TI41/hTCvznF/loiSzn8w==
+jsonwebtoken@^9.0.0:
+ version "9.0.0"
+ resolved "https://registry.yarnpkg.com/jsonwebtoken/-/jsonwebtoken-9.0.0.tgz#d0faf9ba1cc3a56255fe49c0961a67e520c1926d"
+ integrity sha512-tuGfYXxkQGDPnLJ7SibiQgVgeDgfbPq2k2ICcbgqW8WxWLBAxKQM/ZCu/IT8SOSwmaYl4dpTFCW5xZv7YbbWUw==
+ dependencies:
+ jws "^3.2.2"
+ lodash "^4.17.21"
+ ms "^2.1.1"
+ semver "^7.3.8"
+
+jsonwebtoken@^9.0.2:
+ version "9.0.2"
+ resolved "https://registry.yarnpkg.com/jsonwebtoken/-/jsonwebtoken-9.0.2.tgz#65ff91f4abef1784697d40952bb1998c504caaf3"
+ integrity sha512-PRp66vJ865SSqOlgqS8hujT5U4AOgMfhrwYIuIhfKaoSCZcirrmASQr8CX7cUg+RMih+hgznrjp99o+W4pJLHQ==
dependencies:
jws "^3.2.2"
lodash.includes "^4.3.0"
@@ -19302,17 +19380,7 @@ jsonwebtoken@^8.3.0, jsonwebtoken@^8.4.0, jsonwebtoken@^8.5.1:
lodash.isstring "^4.0.1"
lodash.once "^4.0.0"
ms "^2.1.1"
- semver "^5.6.0"
-
-jsonwebtoken@^9.0.0:
- version "9.0.0"
- resolved "https://registry.yarnpkg.com/jsonwebtoken/-/jsonwebtoken-9.0.0.tgz#d0faf9ba1cc3a56255fe49c0961a67e520c1926d"
- integrity sha512-tuGfYXxkQGDPnLJ7SibiQgVgeDgfbPq2k2ICcbgqW8WxWLBAxKQM/ZCu/IT8SOSwmaYl4dpTFCW5xZv7YbbWUw==
- dependencies:
- jws "^3.2.2"
- lodash "^4.17.21"
- ms "^2.1.1"
- semver "^7.3.8"
+ semver "^7.5.4"
jsprim@^1.2.2:
version "1.4.2"
@@ -21573,6 +21641,11 @@ nwsapi@^2.2.0:
resolved "https://registry.yarnpkg.com/nwsapi/-/nwsapi-2.2.0.tgz#204879a9e3d068ff2a55139c2c772780681a38b7"
integrity sha512-h2AatdwYH+JHiZpv7pt/gSX1XoRGb7L/qSIeuqA6GwYoF9w1vP1cw42TO0aI2pNyshRK5893hNSl+1//vHK7hQ==
+nwsapi@^2.2.7:
+ version "2.2.10"
+ resolved "https://registry.yarnpkg.com/nwsapi/-/nwsapi-2.2.10.tgz#0b77a68e21a0b483db70b11fad055906e867cda8"
+ integrity sha512-QK0sRs7MKv0tKe1+5uZIQk/C8XGza4DAnztJG8iD+TpJIORARrCxczA738awHrZoHeTjSSoHqao2teO0dC/gFQ==
+
oauth-sign@~0.9.0:
version "0.9.0"
resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455"
@@ -22242,6 +22315,13 @@ parse5@^5.1.1:
resolved "https://registry.yarnpkg.com/parse5/-/parse5-5.1.1.tgz#f68e4e5ba1852ac2cadc00f4555fff6c2abb6178"
integrity sha512-ugq4DFI0Ptb+WWjAdOK16+u/nHfiIrcE+sh8kZMaM0WllQKLI9rOUq6c2b7cwPkXdzfQESqvoqK6ug7U/Yyzug==
+parse5@^7.1.2:
+ version "7.1.2"
+ resolved "https://registry.yarnpkg.com/parse5/-/parse5-7.1.2.tgz#0736bebbfd77793823240a23b7fc5e010b7f8e32"
+ integrity sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==
+ dependencies:
+ entities "^4.4.0"
+
parseqs@0.0.6:
version "0.0.6"
resolved "https://registry.yarnpkg.com/parseqs/-/parseqs-0.0.6.tgz#8e4bb5a19d1cdc844a08ac974d34e273afa670d5"
@@ -23783,6 +23863,11 @@ punycode@^2.1.0, punycode@^2.1.1:
resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec"
integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==
+punycode@^2.3.1:
+ version "2.3.1"
+ resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5"
+ integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==
+
pupa@^2.1.1:
version "2.1.1"
resolved "https://registry.yarnpkg.com/pupa/-/pupa-2.1.1.tgz#f5e8fd4afc2c5d97828faa523549ed8744a20d62"
@@ -25312,6 +25397,11 @@ rollup@^3.27.1:
optionalDependencies:
fsevents "~2.3.2"
+rrweb-cssom@^0.6.0:
+ version "0.6.0"
+ resolved "https://registry.yarnpkg.com/rrweb-cssom/-/rrweb-cssom-0.6.0.tgz#ed298055b97cbddcdeb278f904857629dec5e0e1"
+ integrity sha512-APM0Gt1KoXBz0iIkkdB/kfvGOwC4UuJFeG/c+yV7wSc7q96cG/kJ0HiYCnzivD9SB53cLV1MlHFNfOuPaadYSw==
+
run-async@^2.4.0:
version "2.4.1"
resolved "https://registry.yarnpkg.com/run-async/-/run-async-2.4.1.tgz#8440eccf99ea3e70bd409d49aab88e10c189a455"
@@ -25447,6 +25537,13 @@ saxes@^5.0.1:
dependencies:
xmlchars "^2.2.0"
+saxes@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.yarnpkg.com/saxes/-/saxes-6.0.0.tgz#fe5b4a4768df4f14a201b1ba6a65c1f3d9988cc5"
+ integrity sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==
+ dependencies:
+ xmlchars "^2.2.0"
+
scheduler@^0.20.2:
version "0.20.2"
resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.20.2.tgz#4baee39436e34aa93b4874bddcbf0fe8b8b50e91"
@@ -27594,6 +27691,16 @@ tough-cookie@^4.0.0:
punycode "^2.1.1"
universalify "^0.1.2"
+tough-cookie@^4.1.3:
+ version "4.1.4"
+ resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-4.1.4.tgz#945f1461b45b5a8c76821c33ea49c3ac192c1b36"
+ integrity sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==
+ dependencies:
+ psl "^1.1.33"
+ punycode "^2.1.1"
+ universalify "^0.2.0"
+ url-parse "^1.5.3"
+
tr46@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/tr46/-/tr46-2.1.0.tgz#fa87aa81ca5d5941da8cbf1f9b749dc969a4e240"
@@ -27601,6 +27708,13 @@ tr46@^2.1.0:
dependencies:
punycode "^2.1.1"
+tr46@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.yarnpkg.com/tr46/-/tr46-5.0.0.tgz#3b46d583613ec7283020d79019f1335723801cec"
+ integrity sha512-tk2G5R2KRwBd+ZN0zaEXpmzdKyOYksXwywulIX95MBODjSzMIuQnQ3m8JxgbhnL1LeVo7lqQKsYa1O3Htl7K5g==
+ dependencies:
+ punycode "^2.3.1"
+
tr46@~0.0.3:
version "0.0.3"
resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a"
@@ -28039,6 +28153,11 @@ universalify@^0.1.0, universalify@^0.1.2:
resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66"
integrity sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==
+universalify@^0.2.0:
+ version "0.2.0"
+ resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.2.0.tgz#6451760566fa857534745ab1dde952d1b1761be0"
+ integrity sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==
+
universalify@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.0.tgz#75a4984efedc4b08975c5aeb73f530d02df25717"
@@ -28474,6 +28593,13 @@ w3c-xmlserializer@^2.0.0:
dependencies:
xml-name-validator "^3.0.0"
+w3c-xmlserializer@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.yarnpkg.com/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz#f925ba26855158594d907313cedd1476c5967f6c"
+ integrity sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==
+ dependencies:
+ xml-name-validator "^5.0.0"
+
walker@^1.0.7:
version "1.0.8"
resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.8.tgz#bd498db477afe573dc04185f011d3ab8a8d7653f"
@@ -28533,6 +28659,11 @@ webidl-conversions@^6.1.0:
resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-6.1.0.tgz#9111b4d7ea80acd40f5270d666621afa78b69514"
integrity sha512-qBIvFLGiBpLjfwmYAaHPXsn+ho5xZnGvyGvsarywGNc8VyQJUMHJ8OBKGGrPER0okBeMDaan4mNBlgBROxuI8w==
+webidl-conversions@^7.0.0:
+ version "7.0.0"
+ resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-7.0.0.tgz#256b4e1882be7debbf01d05f0aa2039778ea080a"
+ integrity sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==
+
webpack-bundle-analyzer@^4.4.0:
version "4.10.0"
resolved "https://registry.yarnpkg.com/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.0.tgz#eecb0ade9bd1944d3d2e38262ec9793da6f13e69"
@@ -28805,6 +28936,13 @@ whatwg-encoding@^1.0.5:
dependencies:
iconv-lite "0.4.24"
+whatwg-encoding@^3.1.1:
+ version "3.1.1"
+ resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz#d0f4ef769905d426e1688f3e34381a99b60b76e5"
+ integrity sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==
+ dependencies:
+ iconv-lite "0.6.3"
+
whatwg-fetch@>=0.10.0, whatwg-fetch@^3.0.0, whatwg-fetch@^3.6.2:
version "3.6.19"
resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-3.6.19.tgz#caefd92ae630b91c07345537e67f8354db470973"
@@ -28815,6 +28953,19 @@ whatwg-mimetype@^2.3.0:
resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz#3d4b1e0312d2079879f826aff18dbeeca5960fbf"
integrity sha512-M4yMwr6mAnQz76TbJm914+gPpB/nCwvZbJU28cUD6dR004SAxDLOOSUaB1JDRqLtaOV/vi0IC5lEAGFgrjGv/g==
+whatwg-mimetype@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz#bc1bf94a985dc50388d54a9258ac405c3ca2fc0a"
+ integrity sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==
+
+whatwg-url@^14.0.0:
+ version "14.0.0"
+ resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-14.0.0.tgz#00baaa7fd198744910c4b1ef68378f2200e4ceb6"
+ integrity sha512-1lfMEm2IEr7RIV+f4lUNPOqfFL+pO+Xw3fJSqmjX9AbXcXcYOkCe1P6+9VBZB6n94af16NfZf+sSk0JCBZC9aw==
+ dependencies:
+ tr46 "^5.0.0"
+ webidl-conversions "^7.0.0"
+
whatwg-url@^5.0.0:
version "5.0.0"
resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d"
@@ -29126,6 +29277,11 @@ ws@^8.13.0:
resolved "https://registry.yarnpkg.com/ws/-/ws-8.14.2.tgz#6c249a806eb2db7a20d26d51e7709eab7b2e6c7f"
integrity sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g==
+ws@^8.16.0:
+ version "8.17.0"
+ resolved "https://registry.yarnpkg.com/ws/-/ws-8.17.0.tgz#d145d18eca2ed25aaf791a183903f7be5e295fea"
+ integrity sha512-uJq6108EgZMAl20KagGkzCKfMEjxmKvZHG7Tlq0Z6nOky7YF7aq4mOx6xK8TJ/i1LeK4Qus7INktacctDgY8Ow==
+
ws@~0.4.32:
version "0.4.32"
resolved "https://registry.yarnpkg.com/ws/-/ws-0.4.32.tgz#787a6154414f3c99ed83c5772153b20feb0cec32"
@@ -29161,6 +29317,11 @@ xml-name-validator@^3.0.0:
resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-3.0.0.tgz#6ae73e06de4d8c6e47f9fb181f78d648ad457c6a"
integrity sha512-A5CUptxDsvxKJEU3yO6DuWBSJz/qizqzJKOMIfUJHETbBw/sFaDxgd6fxm1ewUaM0jZ444Fc5vC5ROYurg/4Pw==
+xml-name-validator@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-5.0.0.tgz#82be9b957f7afdacf961e5980f1bf227c0bf7673"
+ integrity sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==
+
xml-parse-from-string@^1.0.0:
version "1.0.1"
resolved "https://registry.yarnpkg.com/xml-parse-from-string/-/xml-parse-from-string-1.0.1.tgz#a9029e929d3dbcded169f3c6e28238d95a5d5a28"
From 7a7c8cb4e812bc4289a89fd2122d961d2b952b5f Mon Sep 17 00:00:00 2001
From: Sam Hughes
Date: Mon, 9 Sep 2024 11:18:09 -0700
Subject: [PATCH 014/415] refactor(cubestore): Create
CubestoreMetadataCacheFactory and TableExtensionService DIServices (#8625)
---
rust/cubestore/Cargo.lock | 137 +++++++++++++++++-
rust/cubestore/cubestore/src/config/mod.rs | 56 ++++++-
rust/cubestore/cubestore/src/metastore/mod.rs | 15 ++
.../cubestore/src/metastore/table.rs | 10 +-
.../queryplanner/info_schema/system_tables.rs | 9 ++
.../cubestore/src/queryplanner/mod.rs | 6 +
.../cubestore/src/queryplanner/planning.rs | 3 +
.../src/queryplanner/query_executor.rs | 8 +-
.../cubestore/src/queryplanner/test_utils.rs | 1 +
rust/cubestore/cubestore/src/sql/mod.rs | 13 ++
.../cubestore/src/sql/table_creator.rs | 37 +++++
.../cubestore/src/store/compaction.rs | 63 ++++++--
rust/cubestore/cubestore/src/store/mod.rs | 22 ++-
.../cubestore/src/streaming/kafka.rs | 4 +-
.../src/streaming/kafka_post_processing.rs | 21 ++-
rust/cubestore/cubestore/src/streaming/mod.rs | 5 +
rust/cubestore/cubestore/src/table/parquet.rs | 79 ++++++++--
17 files changed, 448 insertions(+), 41 deletions(-)
diff --git a/rust/cubestore/Cargo.lock b/rust/cubestore/Cargo.lock
index 6c1adcd965db7..0e31cc3d251af 100644
--- a/rust/cubestore/Cargo.lock
+++ b/rust/cubestore/Cargo.lock
@@ -54,6 +54,41 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234"
+[[package]]
+name = "aead"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0"
+dependencies = [
+ "crypto-common",
+ "generic-array 0.14.4",
+]
+
+[[package]]
+name = "aes"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0"
+dependencies = [
+ "cfg-if 1.0.0",
+ "cipher",
+ "cpufeatures 0.2.5",
+]
+
+[[package]]
+name = "aes-gcm"
+version = "0.10.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1"
+dependencies = [
+ "aead",
+ "aes",
+ "cipher",
+ "ctr",
+ "ghash",
+ "subtle",
+]
+
[[package]]
name = "ahash"
version = "0.7.4"
@@ -125,7 +160,7 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
[[package]]
name = "arrow"
version = "5.0.0"
-source = "git+https://github.com/cube-js/arrow-rs?branch=cube#ba5455c2b7bf693f67cab4a7616e3ce41fd97e8c"
+source = "git+https://github.com/cube-js/arrow-rs.git?branch=cube#9d6173caa1756600981f245c43197bb4d52dcac7"
dependencies = [
"bitflags 1.3.2",
"chrono",
@@ -667,6 +702,16 @@ dependencies = [
"half",
]
+[[package]]
+name = "cipher"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad"
+dependencies = [
+ "crypto-common",
+ "inout",
+]
+
[[package]]
name = "clang-sys"
version = "1.7.0"
@@ -1017,6 +1062,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
dependencies = [
"generic-array 0.14.4",
+ "rand_core 0.6.3",
"typenum",
]
@@ -1052,6 +1098,15 @@ dependencies = [
"syn 1.0.107",
]
+[[package]]
+name = "ctr"
+version = "0.9.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835"
+dependencies = [
+ "cipher",
+]
+
[[package]]
name = "cubedatasketches"
version = "0.1.0"
@@ -1146,7 +1201,7 @@ dependencies = [
"msql-srv",
"nanoid",
"num 0.3.1",
- "parquet-format",
+ "parquet-format 2.6.1",
"parse-size",
"paste",
"pin-project",
@@ -1269,7 +1324,7 @@ checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308"
[[package]]
name = "datafusion"
version = "4.0.0-SNAPSHOT"
-source = "git+https://github.com/cube-js/arrow-datafusion?branch=cube#11027d53f93c550d86e32ebf75e3a54cef6c8546"
+source = "git+https://github.com/cube-js/arrow-datafusion?branch=cube#faf7acb5a3f3d4976711f6faf76c7750b22b0eda"
dependencies = [
"ahash",
"arrow",
@@ -1841,6 +1896,16 @@ dependencies = [
"wasi 0.11.0+wasi-snapshot-preview1",
]
+[[package]]
+name = "ghash"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1"
+dependencies = [
+ "opaque-debug 0.3.0",
+ "polyval",
+]
+
[[package]]
name = "gimli"
version = "0.25.0"
@@ -2211,6 +2276,15 @@ dependencies = [
"unindent",
]
+[[package]]
+name = "inout"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5"
+dependencies = [
+ "generic-array 0.14.4",
+]
+
[[package]]
name = "instant"
version = "0.1.10"
@@ -2337,6 +2411,15 @@ dependencies = [
"simple_asn1",
]
+[[package]]
+name = "keccak"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654"
+dependencies = [
+ "cpufeatures 0.2.5",
+]
+
[[package]]
name = "kernel32-sys"
version = "0.2.2"
@@ -3238,8 +3321,9 @@ dependencies = [
[[package]]
name = "parquet"
version = "5.0.0"
-source = "git+https://github.com/cube-js/arrow-rs?branch=cube#ba5455c2b7bf693f67cab4a7616e3ce41fd97e8c"
+source = "git+https://github.com/cube-js/arrow-rs.git?branch=cube#9d6173caa1756600981f245c43197bb4d52dcac7"
dependencies = [
+ "aes-gcm",
"arrow",
"base64 0.13.0",
"brotli",
@@ -3248,8 +3332,10 @@ dependencies = [
"flate2",
"lz4",
"num-bigint 0.4.3",
- "parquet-format",
+ "parquet-format 4.0.0",
"rand 0.8.4",
+ "serde",
+ "sha3",
"snap",
"thrift",
"zstd",
@@ -3264,6 +3350,15 @@ dependencies = [
"thrift",
]
+[[package]]
+name = "parquet-format"
+version = "4.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f0c06cdcd5460967c485f9c40a821746f5955ad81990533c7fae95dbd9bc0b5"
+dependencies = [
+ "thrift",
+]
+
[[package]]
name = "parse-size"
version = "1.0.0"
@@ -3431,6 +3526,18 @@ dependencies = [
"winapi 0.3.9",
]
+[[package]]
+name = "polyval"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25"
+dependencies = [
+ "cfg-if 1.0.0",
+ "cpufeatures 0.2.5",
+ "opaque-debug 0.3.0",
+ "universal-hash",
+]
+
[[package]]
name = "powerfmt"
version = "0.2.0"
@@ -4438,6 +4545,16 @@ dependencies = [
"digest 0.10.7",
]
+[[package]]
+name = "sha3"
+version = "0.10.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60"
+dependencies = [
+ "digest 0.10.7",
+ "keccak",
+]
+
[[package]]
name = "shared_child"
version = "1.0.0"
@@ -5236,6 +5353,16 @@ version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "514672a55d7380da379785a4d70ca8386c8883ff7eaae877be4d2081cebe73d8"
+[[package]]
+name = "universal-hash"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea"
+dependencies = [
+ "crypto-common",
+ "subtle",
+]
+
[[package]]
name = "untrusted"
version = "0.7.1"
diff --git a/rust/cubestore/cubestore/src/config/mod.rs b/rust/cubestore/cubestore/src/config/mod.rs
index 7e4597a5a0e49..ed9d87caff2df 100644
--- a/rust/cubestore/cubestore/src/config/mod.rs
+++ b/rust/cubestore/cubestore/src/config/mod.rs
@@ -32,11 +32,15 @@ use crate::remotefs::{LocalDirRemoteFs, RemoteFs};
use crate::scheduler::SchedulerImpl;
use crate::sql::cache::SqlResultCache;
use crate::sql::{SqlService, SqlServiceImpl};
+use crate::sql::{TableExtensionService, TableExtensionServiceImpl};
use crate::store::compaction::{CompactionService, CompactionServiceImpl};
use crate::store::{ChunkDataStore, ChunkStore, WALDataStore, WALStore};
use crate::streaming::kafka::{KafkaClientService, KafkaClientServiceImpl};
use crate::streaming::{KsqlClient, KsqlClientImpl, StreamingService, StreamingServiceImpl};
-use crate::table::parquet::{CubestoreParquetMetadataCache, CubestoreParquetMetadataCacheImpl};
+use crate::table::parquet::{
+ CubestoreMetadataCacheFactory, CubestoreMetadataCacheFactoryImpl,
+ CubestoreParquetMetadataCache, CubestoreParquetMetadataCacheImpl,
+};
use crate::telemetry::tracing::{TracingHelper, TracingHelperImpl};
use crate::telemetry::{
start_agent_event_loop, start_track_event_loop, stop_agent_event_loop, stop_track_event_loop,
@@ -45,7 +49,7 @@ use crate::util::memory::{MemoryHandler, MemoryHandlerImpl};
use crate::CubeError;
use cuberockstore::rocksdb::{Options, DB};
use datafusion::cube_ext;
-use datafusion::physical_plan::parquet::{LruParquetMetadataCache, NoopParquetMetadataCache};
+use datafusion::physical_plan::parquet::BasicMetadataCacheFactory;
use futures::future::join_all;
use log::Level;
use log::{debug, error};
@@ -535,7 +539,6 @@ pub trait ConfigObj: DIService {
fn remote_files_cleanup_interval_secs(&self) -> u64;
fn local_files_cleanup_size_threshold(&self) -> u64;
-
fn local_files_cleanup_delay_secs(&self) -> u64;
fn remote_files_cleanup_delay_secs(&self) -> u64;
@@ -2002,11 +2005,16 @@ impl Config {
self.injector
.register_typed::(async move |i| {
+ let metadata_cache_factory = i
+ .get_service_typed::()
+ .await
+ .cache_factory();
ChunkStore::new(
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
+ metadata_cache_factory,
i.get_service_typed::()
.await
.wal_split_threshold() as usize,
@@ -2017,10 +2025,14 @@ impl Config {
self.injector
.register_typed::(async move |i| {
let c = i.get_service_typed::().await;
+ let metadata_cache_factory = i
+ .get_service_typed::()
+ .await
+ .cache_factory();
CubestoreParquetMetadataCacheImpl::new(
match c.metadata_cache_max_capacity_bytes() {
- 0 => NoopParquetMetadataCache::new(),
- max_cached_metadata => LruParquetMetadataCache::new(
+ 0 => metadata_cache_factory.make_noop_cache(),
+ max_cached_metadata => metadata_cache_factory.make_lru_cache(
max_cached_metadata,
Duration::from_secs(c.metadata_cache_time_to_idle_secs()),
),
@@ -2031,11 +2043,16 @@ impl Config {
self.injector
.register_typed::(async move |i| {
+ let metadata_cache_factory = i
+ .get_service_typed::()
+ .await
+ .cache_factory();
CompactionServiceImpl::new(
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
+ metadata_cache_factory,
)
})
.await;
@@ -2060,6 +2077,12 @@ impl Config {
})
.await;
+ self.injector
+ .register_typed::(async move |_| {
+ TableExtensionServiceImpl::new()
+ })
+ .await;
+
self.injector
.register_typed::(async move |i| {
StreamingServiceImpl::new(
@@ -2068,6 +2091,9 @@ impl Config {
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
+ i.get_service_typed::()
+ .await
+ .cache_factory(),
)
})
.await;
@@ -2136,6 +2162,12 @@ impl Config {
}
pub async fn configure_common(&self) {
+ self.injector
+ .register_typed::(async move |_| {
+ CubestoreMetadataCacheFactoryImpl::new(Arc::new(BasicMetadataCacheFactory::new()))
+ })
+ .await;
+
self.injector
.register_typed_with_default::(async move |i| {
QueueRemoteFs::new(
@@ -2160,18 +2192,29 @@ impl Config {
let query_cache_to_move = query_cache.clone();
self.injector
.register_typed::(async move |i| {
+ let metadata_cache_factory = i
+ .get_service_typed::()
+ .await
+ .cache_factory();
QueryPlannerImpl::new(
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
query_cache_to_move,
+ metadata_cache_factory,
)
})
.await;
self.injector
.register_typed_with_default::(async move |i| {
- QueryExecutorImpl::new(i.get_service_typed().await, i.get_service_typed().await)
+ QueryExecutorImpl::new(
+ i.get_service_typed::()
+ .await
+ .cache_factory(),
+ i.get_service_typed().await,
+ i.get_service_typed().await,
+ )
})
.await;
@@ -2210,6 +2253,7 @@ impl Config {
i.get_service_typed().await,
i.get_service_typed().await,
i.get_service_typed().await,
+ i.get_service_typed().await,
c.wal_split_threshold() as usize,
Duration::from_secs(c.query_timeout()),
Duration::from_secs(c.import_job_timeout() * 2),
diff --git a/rust/cubestore/cubestore/src/metastore/mod.rs b/rust/cubestore/cubestore/src/metastore/mod.rs
index 7e115c465a55b..30d300914493d 100644
--- a/rust/cubestore/cubestore/src/metastore/mod.rs
+++ b/rust/cubestore/cubestore/src/metastore/mod.rs
@@ -856,6 +856,7 @@ pub trait MetaStore: DIService + Send + Sync {
partition_split_threshold: Option,
trace_obj: Option,
drop_if_exists: bool,
+ extension: Option,
) -> Result, CubeError>;
async fn table_ready(&self, id: u64, is_ready: bool) -> Result, CubeError>;
async fn seal_table(&self, id: u64) -> Result, CubeError>;
@@ -2087,6 +2088,7 @@ impl MetaStore for RocksMetaStore {
partition_split_threshold: Option,
trace_obj: Option,
drop_if_exists: bool,
+ extension: Option,
) -> Result, CubeError> {
self.write_operation(move |db_ref, batch_pipe| {
batch_pipe.invalidate_tables_cache();
@@ -2189,6 +2191,7 @@ impl MetaStore for RocksMetaStore {
aggregate_column_indices,
seq_column_index,
partition_split_threshold,
+ extension,
);
let table_id = rocks_table.insert(table, batch_pipe)?;
@@ -5143,6 +5146,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -5166,6 +5170,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -5290,6 +5295,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -5315,6 +5321,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.is_err());
@@ -5406,6 +5413,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -5498,6 +5506,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -5571,6 +5580,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.is_err());
@@ -5594,6 +5604,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.is_err());
@@ -5620,6 +5631,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.is_err());
@@ -6104,6 +6116,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -6326,6 +6339,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -6467,6 +6481,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
diff --git a/rust/cubestore/cubestore/src/metastore/table.rs b/rust/cubestore/cubestore/src/metastore/table.rs
index ad0b1709dc5a5..c0e464fadca87 100644
--- a/rust/cubestore/cubestore/src/metastore/table.rs
+++ b/rust/cubestore/cubestore/src/metastore/table.rs
@@ -153,7 +153,9 @@ pub struct Table {
#[serde(default)]
location_download_sizes: Option>,
#[serde(default)]
- partition_split_threshold: Option
+ partition_split_threshold: Option,
+ #[serde(default)]
+ extension: Option // TODO: Make this an Option or Option? We have some problems implementing Hash.
}
}
@@ -190,6 +192,7 @@ impl Table {
aggregate_column_indices: Vec,
seq_column_index: Option,
partition_split_threshold: Option,
+ extension: Option,
) -> Table {
let location_download_sizes = locations.as_ref().map(|locations| vec![0; locations.len()]);
Table {
@@ -212,6 +215,7 @@ impl Table {
seq_column_index,
location_download_sizes,
partition_split_threshold,
+ extension,
}
}
pub fn get_columns(&self) -> &Vec {
@@ -312,6 +316,10 @@ impl Table {
&self.select_statement
}
+ pub fn extension(&self) -> &Option {
+ &self.extension
+ }
+
pub fn source_columns(&self) -> &Option> {
&self.source_columns
}
diff --git a/rust/cubestore/cubestore/src/queryplanner/info_schema/system_tables.rs b/rust/cubestore/cubestore/src/queryplanner/info_schema/system_tables.rs
index 6fb259c8957c2..55060cb065add 100644
--- a/rust/cubestore/cubestore/src/queryplanner/info_schema/system_tables.rs
+++ b/rust/cubestore/cubestore/src/queryplanner/info_schema/system_tables.rs
@@ -54,6 +54,7 @@ impl InfoSchemaTableDef for SystemTablesTableDef {
),
Field::new("sealed", DataType::Boolean, false),
Field::new("select_statement", DataType::Utf8, false),
+ Field::new("extension", DataType::Utf8, true),
]
}
@@ -247,6 +248,14 @@ impl InfoSchemaTableDef for SystemTablesTableDef {
.collect::>(),
))
}),
+ Box::new(|tables| {
+ Arc::new(StringArray::from(
+ tables
+ .iter()
+ .map(|row| row.table.get_row().extension().as_ref().map(|t| t.as_str()))
+ .collect::>(),
+ ))
+ }),
]
}
}
diff --git a/rust/cubestore/cubestore/src/queryplanner/mod.rs b/rust/cubestore/cubestore/src/queryplanner/mod.rs
index b661ab0393ab2..a18e6edf75d09 100644
--- a/rust/cubestore/cubestore/src/queryplanner/mod.rs
+++ b/rust/cubestore/cubestore/src/queryplanner/mod.rs
@@ -3,6 +3,7 @@ mod optimizations;
pub mod panic;
mod partition_filter;
mod planning;
+use datafusion::physical_plan::parquet::MetadataCacheFactory;
pub use planning::PlanningMeta;
mod check_memory;
pub mod physical_plan_flags;
@@ -98,6 +99,7 @@ pub struct QueryPlannerImpl {
cache_store: Arc,
config: Arc,
cache: Arc,
+ metadata_cache_factory: Arc,
}
crate::di_service!(QueryPlannerImpl, [QueryPlanner]);
@@ -179,12 +181,14 @@ impl QueryPlannerImpl {
cache_store: Arc,
config: Arc,
cache: Arc,
+ metadata_cache_factory: Arc,
) -> Arc {
Arc::new(QueryPlannerImpl {
meta_store,
cache_store,
config,
cache,
+ metadata_cache_factory,
})
}
}
@@ -193,6 +197,7 @@ impl QueryPlannerImpl {
async fn execution_context(&self) -> Result, CubeError> {
Ok(Arc::new(ExecutionContext::with_config(
ExecutionConfig::new()
+ .with_metadata_cache_factory(self.metadata_cache_factory.clone())
.add_optimizer_rule(Arc::new(MaterializeNow {}))
.add_optimizer_rule(Arc::new(FlattenUnion {})),
)))
@@ -294,6 +299,7 @@ impl ContextProvider for MetaStoreSchemaProvider {
Vec::new(),
None,
None,
+ None,
),
),
schema: Arc::new(IdRow::new(0, metastore::Schema::new(schema.to_string()))),
diff --git a/rust/cubestore/cubestore/src/queryplanner/planning.rs b/rust/cubestore/cubestore/src/queryplanner/planning.rs
index 2efcb66ea60b1..a35b96837115f 100644
--- a/rust/cubestore/cubestore/src/queryplanner/planning.rs
+++ b/rust/cubestore/cubestore/src/queryplanner/planning.rs
@@ -2142,6 +2142,7 @@ pub mod tests {
Vec::new(),
None,
None,
+ None,
));
i.indices.push(
Index::try_new(
@@ -2193,6 +2194,7 @@ pub mod tests {
Vec::new(),
None,
None,
+ None,
));
i.indices.push(
@@ -2250,6 +2252,7 @@ pub mod tests {
Vec::new(),
None,
None,
+ None,
));
i
diff --git a/rust/cubestore/cubestore/src/queryplanner/query_executor.rs b/rust/cubestore/cubestore/src/queryplanner/query_executor.rs
index c58dc44971468..4bf2755c49add 100644
--- a/rust/cubestore/cubestore/src/queryplanner/query_executor.rs
+++ b/rust/cubestore/cubestore/src/queryplanner/query_executor.rs
@@ -44,7 +44,7 @@ use datafusion::physical_plan::memory::MemoryExec;
use datafusion::physical_plan::merge::MergeExec;
use datafusion::physical_plan::merge_sort::{LastRowByUniqueKeyExec, MergeSortExec};
use datafusion::physical_plan::parquet::{
- NoopParquetMetadataCache, ParquetExec, ParquetMetadataCache,
+ MetadataCacheFactory, NoopParquetMetadataCache, ParquetExec, ParquetMetadataCache,
};
use datafusion::physical_plan::projection::ProjectionExec;
use datafusion::physical_plan::{
@@ -105,6 +105,8 @@ pub trait QueryExecutor: DIService + Send + Sync {
crate::di_service!(MockQueryExecutor, [QueryExecutor]);
pub struct QueryExecutorImpl {
+ // TODO: Why do we need a MetadataCacheFactory when we have a ParquetMetadataCache?
+ metadata_cache_factory: Arc,
parquet_metadata_cache: Arc,
memory_handler: Arc,
}
@@ -312,10 +314,12 @@ impl QueryExecutor for QueryExecutorImpl {
impl QueryExecutorImpl {
pub fn new(
+ metadata_cache_factory: Arc,
parquet_metadata_cache: Arc,
memory_handler: Arc,
) -> Arc {
Arc::new(QueryExecutorImpl {
+ metadata_cache_factory,
parquet_metadata_cache,
memory_handler,
})
@@ -328,6 +332,7 @@ impl QueryExecutorImpl {
) -> Result, CubeError> {
Ok(Arc::new(ExecutionContext::with_config(
ExecutionConfig::new()
+ .with_metadata_cache_factory(self.metadata_cache_factory.clone())
.with_batch_size(4096)
.with_concurrency(1)
.with_query_planner(Arc::new(CubeQueryPlanner::new_on_router(
@@ -345,6 +350,7 @@ impl QueryExecutorImpl {
) -> Result, CubeError> {
Ok(Arc::new(ExecutionContext::with_config(
ExecutionConfig::new()
+ .with_metadata_cache_factory(self.metadata_cache_factory.clone())
.with_batch_size(4096)
.with_concurrency(1)
.with_query_planner(Arc::new(CubeQueryPlanner::new_on_worker(
diff --git a/rust/cubestore/cubestore/src/queryplanner/test_utils.rs b/rust/cubestore/cubestore/src/queryplanner/test_utils.rs
index d5e1c891fb688..f23219aeec260 100644
--- a/rust/cubestore/cubestore/src/queryplanner/test_utils.rs
+++ b/rust/cubestore/cubestore/src/queryplanner/test_utils.rs
@@ -105,6 +105,7 @@ impl MetaStore for MetaStoreMock {
_partition_split_threshold: Option,
_trace_obj: Option,
_drop_if_exists: bool,
+ _extension: Option,
) -> Result, CubeError> {
panic!("MetaStore mock!")
}
diff --git a/rust/cubestore/cubestore/src/sql/mod.rs b/rust/cubestore/cubestore/src/sql/mod.rs
index 90d382c1b277b..8c53ccb6d9bb6 100644
--- a/rust/cubestore/cubestore/src/sql/mod.rs
+++ b/rust/cubestore/cubestore/src/sql/mod.rs
@@ -80,6 +80,7 @@ use crate::sql::cachestore::CacheStoreSqlService;
use crate::util::metrics;
use mockall::automock;
use table_creator::{convert_columns_type, TableCreator};
+pub use table_creator::{TableExtensionService, TableExtensionServiceImpl};
#[automock]
#[async_trait]
@@ -187,6 +188,7 @@ impl SqlServiceImpl {
query_executor: Arc,
cluster: Arc,
import_service: Arc,
+ table_extension_service: Arc,
config_obj: Arc,
remote_fs: Arc,
rows_per_chunk: usize,
@@ -205,6 +207,7 @@ impl SqlServiceImpl {
db.clone(),
cluster.clone(),
import_service,
+ table_extension_service,
config_obj.clone(),
create_table_timeout,
cache.clone(),
@@ -1659,11 +1662,13 @@ mod tests {
use crate::store::compaction::CompactionService;
use async_compression::tokio::write::GzipEncoder;
use cuberockstore::rocksdb::{Options, DB};
+ use datafusion::physical_plan::parquet::BasicMetadataCacheFactory;
use futures_timer::Delay;
use itertools::Itertools;
use pretty_assertions::assert_eq;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
+ use table_creator::TableExtensionServiceImpl;
use tokio::io::{AsyncWriteExt, BufWriter};
use uuid::Uuid;
@@ -1723,6 +1728,7 @@ mod tests {
remote_fs.clone(),
Arc::new(MockCluster::new()),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
rows_per_chunk,
);
let limits = Arc::new(ConcurrencyLimits::new(4));
@@ -1735,6 +1741,7 @@ mod tests {
Arc::new(MockQueryExecutor::new()),
Arc::new(MockCluster::new()),
Arc::new(MockImportService::new()),
+ TableExtensionServiceImpl::new(),
config.config_obj(),
remote_fs.clone(),
rows_per_chunk,
@@ -1800,6 +1807,7 @@ mod tests {
remote_fs.clone(),
Arc::new(MockCluster::new()),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
rows_per_chunk,
);
let limits = Arc::new(ConcurrencyLimits::new(4));
@@ -1812,6 +1820,7 @@ mod tests {
Arc::new(MockQueryExecutor::new()),
Arc::new(MockCluster::new()),
Arc::new(MockImportService::new()),
+ TableExtensionServiceImpl::new(),
config.config_obj(),
remote_fs.clone(),
rows_per_chunk,
@@ -1861,6 +1870,7 @@ mod tests {
TableValue::String("NULL".to_string()),
TableValue::String("NULL".to_string()),
TableValue::String("NULL".to_string()),
+ TableValue::String("NULL".to_string()),
]));
}
@@ -1907,6 +1917,7 @@ mod tests {
remote_fs.clone(),
Arc::new(MockCluster::new()),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
rows_per_chunk,
);
let limits = Arc::new(ConcurrencyLimits::new(4));
@@ -1919,6 +1930,7 @@ mod tests {
Arc::new(MockQueryExecutor::new()),
Arc::new(MockCluster::new()),
Arc::new(MockImportService::new()),
+ TableExtensionServiceImpl::new(),
config.config_obj(),
remote_fs.clone(),
rows_per_chunk,
@@ -1968,6 +1980,7 @@ mod tests {
TableValue::String("NULL".to_string()),
TableValue::String("NULL".to_string()),
TableValue::String("NULL".to_string()),
+ TableValue::String("NULL".to_string()),
]));
}
diff --git a/rust/cubestore/cubestore/src/sql/table_creator.rs b/rust/cubestore/cubestore/src/sql/table_creator.rs
index a39db27f3b9f9..a7cef4bc156ef 100644
--- a/rust/cubestore/cubestore/src/sql/table_creator.rs
+++ b/rust/cubestore/cubestore/src/sql/table_creator.rs
@@ -2,6 +2,7 @@ use std::sync::Arc;
use std::time::Duration;
use crate::cluster::{Cluster, JobEvent, JobResultListener};
+use crate::config::injection::DIService;
use crate::config::ConfigObj;
use crate::import::ImportService;
use crate::metastore::job::JobType;
@@ -14,11 +15,35 @@ use crate::sql::cache::SqlResultCache;
use crate::sql::parser::{CubeStoreParser, PartitionedIndexRef};
use crate::telemetry::incoming_traffic_agent_event;
use crate::CubeError;
+use async_trait::async_trait;
use chrono::{DateTime, Utc};
use futures::future::join_all;
use sqlparser::ast::*;
use std::mem::take;
+#[async_trait]
+
+pub trait TableExtensionService: DIService + Send + Sync {
+ async fn get_extension(&self) -> Option;
+}
+
+pub struct TableExtensionServiceImpl;
+
+impl TableExtensionServiceImpl {
+ pub fn new() -> Arc {
+ Arc::new(Self {})
+ }
+}
+
+#[async_trait]
+impl TableExtensionService for TableExtensionServiceImpl {
+ async fn get_extension(&self) -> Option {
+ None
+ }
+}
+
+crate::di_service!(TableExtensionServiceImpl, [TableExtensionService]);
+
enum FinalizeExternalTableResult {
Ok,
Orphaned,
@@ -27,6 +52,7 @@ pub struct TableCreator {
db: Arc,
cluster: Arc,
import_service: Arc,
+ table_extension_service: Arc,
config_obj: Arc,
create_table_timeout: Duration,
cache: Arc,
@@ -37,6 +63,7 @@ impl TableCreator {
db: Arc,
cluster: Arc,
import_service: Arc,
+ table_extension_service: Arc,
config_obj: Arc,
create_table_timeout: Duration,
cache: Arc,
@@ -45,6 +72,7 @@ impl TableCreator {
db,
cluster,
import_service,
+ table_extension_service,
config_obj,
create_table_timeout,
cache,
@@ -70,6 +98,8 @@ impl TableCreator {
partitioned_index: Option,
trace_obj: &Option,
) -> Result, CubeError> {
+ let extension: Option =
+ self.table_extension_service.get_extension().await;
if !if_not_exists {
return self
.create_table_loop(
@@ -90,6 +120,7 @@ impl TableCreator {
aggregates,
partitioned_index,
&trace_obj,
+ &extension,
)
.await;
}
@@ -126,6 +157,7 @@ impl TableCreator {
aggregates,
partitioned_index,
&trace_obj,
+ &extension,
)
.await
})
@@ -151,6 +183,7 @@ impl TableCreator {
aggregates: Option>,
partitioned_index: Option,
trace_obj: &Option,
+ extension: &Option,
) -> Result, CubeError> {
let mut retries = 0;
let max_retries = self.config_obj.create_table_max_retries();
@@ -179,6 +212,7 @@ impl TableCreator {
aggregates.clone(),
partitioned_index.clone(),
trace_obj,
+ extension,
)
.await?;
@@ -251,6 +285,7 @@ impl TableCreator {
aggregates: Option>,
partitioned_index: Option,
trace_obj: &Option,
+ extension: &Option,
) -> Result, CubeError> {
let columns_to_set = convert_columns_type(columns)?;
let mut indexes_to_create = Vec::new();
@@ -369,6 +404,7 @@ impl TableCreator {
None,
None,
false,
+ extension.as_ref().map(|json_value| json_value.to_string()),
)
.await;
}
@@ -449,6 +485,7 @@ impl TableCreator {
partition_split_threshold,
trace_obj_to_save,
if_not_exists,
+ extension.as_ref().map(|json_value| json_value.to_string()),
)
.await?;
diff --git a/rust/cubestore/cubestore/src/store/compaction.rs b/rust/cubestore/cubestore/src/store/compaction.rs
index f451fd236c891..e533679f386a6 100644
--- a/rust/cubestore/cubestore/src/store/compaction.rs
+++ b/rust/cubestore/cubestore/src/store/compaction.rs
@@ -34,7 +34,7 @@ use datafusion::physical_plan::hash_aggregate::{
};
use datafusion::physical_plan::memory::MemoryExec;
use datafusion::physical_plan::merge_sort::{LastRowByUniqueKeyExec, MergeSortExec};
-use datafusion::physical_plan::parquet::ParquetExec;
+use datafusion::physical_plan::parquet::{MetadataCacheFactory, ParquetExec};
use datafusion::physical_plan::union::UnionExec;
use datafusion::physical_plan::{
AggregateExpr, ExecutionPlan, PhysicalExpr, SendableRecordBatchStream,
@@ -75,6 +75,7 @@ pub struct CompactionServiceImpl {
chunk_store: Arc,
remote_fs: Arc,
config: Arc,
+ metadata_cache_factory: Arc,
}
crate::di_service!(CompactionServiceImpl, [CompactionService]);
@@ -85,12 +86,14 @@ impl CompactionServiceImpl {
chunk_store: Arc,
remote_fs: Arc,
config: Arc,
+ metadata_cache_factory: Arc,
) -> Arc {
Arc::new(CompactionServiceImpl {
meta_store,
chunk_store,
remote_fs,
config,
+ metadata_cache_factory,
})
}
@@ -571,7 +574,11 @@ impl CompactionService for CompactionServiceImpl {
}
}
- let store = ParquetTableStore::new(index.get_row().clone(), ROW_GROUP_SIZE);
+ let store = ParquetTableStore::new(
+ index.get_row().clone(),
+ ROW_GROUP_SIZE,
+ self.metadata_cache_factory.clone(),
+ );
let old_partition_remote = match &new_chunk {
Some(_) => None,
None => partition.get_row().get_full_name(partition.get_id()),
@@ -644,13 +651,14 @@ impl CompactionService for CompactionServiceImpl {
let schema = Arc::new(arrow_schema(index.get_row()));
let main_table: Arc = match old_partition_local {
Some(file) => {
- let parquet_exec = Arc::new(ParquetExec::try_from_path(
+ let parquet_exec = Arc::new(ParquetExec::try_from_path_with_cache(
file.as_str(),
None,
None,
ROW_GROUP_SIZE,
1,
None,
+ self.metadata_cache_factory.make_noop_cache(),
)?);
Arc::new(TraceDataLoadedExec::new(
@@ -854,7 +862,7 @@ impl CompactionService for CompactionServiceImpl {
// TODO deactivate corrupt tables
let files = download_files(&partitions, self.remote_fs.clone()).await?;
let keys = find_partition_keys(
- keys_with_counts(&files, key_len).await?,
+ keys_with_counts(&files, self.metadata_cache_factory.as_ref(), key_len).await?,
key_len,
// TODO should it respect table partition_split_threshold?
self.config.partition_split_threshold() as usize,
@@ -897,6 +905,7 @@ impl CompactionService for CompactionServiceImpl {
let mut s = MultiSplit::new(
self.meta_store.clone(),
self.remote_fs.clone(),
+ self.metadata_cache_factory.clone(),
keys,
key_len,
multi_partition_id,
@@ -939,6 +948,7 @@ impl CompactionService for CompactionServiceImpl {
let mut s = MultiSplit::new(
self.meta_store.clone(),
self.remote_fs.clone(),
+ self.metadata_cache_factory.clone(),
keys,
key_len,
multi_partition_id,
@@ -983,19 +993,21 @@ async fn find_partition_keys(
async fn read_files(
files: &[String],
+ metadata_cache_factory: &dyn MetadataCacheFactory,
key_len: usize,
projection: Option>,
) -> Result, CubeError> {
assert!(!files.is_empty());
let mut inputs = Vec::>::with_capacity(files.len());
for f in files {
- inputs.push(Arc::new(ParquetExec::try_from_files(
+ inputs.push(Arc::new(ParquetExec::try_from_files_with_cache(
&[f.as_str()],
projection.clone(),
None,
ROW_GROUP_SIZE,
1,
None,
+ metadata_cache_factory.make_noop_cache(),
)?));
}
let plan = Arc::new(UnionExec::new(inputs));
@@ -1012,10 +1024,17 @@ async fn read_files(
/// this key in the input files.
async fn keys_with_counts(
files: &[String],
+ metadata_cache_factory: &dyn MetadataCacheFactory,
key_len: usize,
) -> Result {
let projection = (0..key_len).collect_vec();
- let plan = read_files(files, key_len, Some(projection.clone())).await?;
+ let plan = read_files(
+ files,
+ metadata_cache_factory,
+ key_len,
+ Some(projection.clone()),
+ )
+ .await?;
let fields = plan.schema();
let fields = fields.fields();
@@ -1404,6 +1423,8 @@ mod tests {
use datafusion::arrow::datatypes::Schema;
use datafusion::arrow::record_batch::RecordBatch;
use datafusion::physical_plan::collect;
+ use datafusion::physical_plan::parquet::BasicMetadataCacheFactory;
+ use datafusion::physical_plan::parquet::NoopParquetMetadataCache;
use std::fs;
use std::path::{Path, PathBuf};
@@ -1436,6 +1457,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -1517,6 +1539,7 @@ mod tests {
Arc::new(chunk_store),
remote_fs,
Arc::new(config),
+ Arc::new(BasicMetadataCacheFactory::new()),
);
compaction_service
.compact(1, DataLoadedSize::new())
@@ -1656,6 +1679,7 @@ mod tests {
remote_fs.clone(),
Arc::new(cluster),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
10,
);
metastore
@@ -1682,6 +1706,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -1742,6 +1767,7 @@ mod tests {
chunk_store.clone(),
remote_fs,
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
);
compaction_service
.compact_in_memory_chunks(partition.get_id())
@@ -1829,6 +1855,7 @@ mod tests {
remote_fs.clone(),
Arc::new(MockCluster::new()),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
50,
);
@@ -1867,6 +1894,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -1930,6 +1958,7 @@ mod tests {
chunk_store.clone(),
remote_fs.clone(),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
);
compaction_service
.compact(partition.get_id(), DataLoadedSize::new())
@@ -1953,8 +1982,16 @@ mod tests {
.await
.unwrap();
let reader = Arc::new(
- ParquetExec::try_from_path(local.as_str(), None, None, ROW_GROUP_SIZE, 1, None)
- .unwrap(),
+ ParquetExec::try_from_path_with_cache(
+ local.as_str(),
+ None,
+ None,
+ ROW_GROUP_SIZE,
+ 1,
+ None,
+ NoopParquetMetadataCache::new(),
+ )
+ .unwrap(),
);
let res_data = &collect(reader).await.unwrap()[0];
@@ -2152,6 +2189,7 @@ mod tests {
struct MultiSplit {
meta: Arc,
fs: Arc,
+ metadata_cache_factory: Arc,
keys: Vec,
key_len: usize,
multi_partition_id: u64,
@@ -2167,6 +2205,7 @@ impl MultiSplit {
fn new(
meta: Arc,
fs: Arc,
+ metadata_cache_factory: Arc,
keys: Vec,
key_len: usize,
multi_partition_id: u64,
@@ -2176,6 +2215,7 @@ impl MultiSplit {
MultiSplit {
meta,
fs,
+ metadata_cache_factory,
keys,
key_len,
multi_partition_id,
@@ -2229,10 +2269,15 @@ impl MultiSplit {
}
});
- let store = ParquetTableStore::new(p.index.get_row().clone(), ROW_GROUP_SIZE);
+ let store = ParquetTableStore::new(
+ p.index.get_row().clone(),
+ ROW_GROUP_SIZE,
+ self.metadata_cache_factory.clone(),
+ );
let records = if !in_files.is_empty() {
read_files(
&in_files.into_iter().map(|(f, _)| f).collect::>(),
+ self.metadata_cache_factory.as_ref(),
self.key_len,
None,
)
diff --git a/rust/cubestore/cubestore/src/store/mod.rs b/rust/cubestore/cubestore/src/store/mod.rs
index 559daa784cbe5..d5393c37a23a7 100644
--- a/rust/cubestore/cubestore/src/store/mod.rs
+++ b/rust/cubestore/cubestore/src/store/mod.rs
@@ -10,6 +10,7 @@ use datafusion::physical_plan::hash_aggregate::{
AggregateMode, AggregateStrategy, HashAggregateExec,
};
use datafusion::physical_plan::memory::MemoryExec;
+use datafusion::physical_plan::parquet::MetadataCacheFactory;
use datafusion::physical_plan::{ExecutionPlan, PhysicalExpr};
use serde::{de, Deserialize, Serialize};
extern crate bincode;
@@ -182,6 +183,7 @@ pub struct ChunkStore {
remote_fs: Arc,
cluster: Arc,
config: Arc,
+ metadata_cache_factory: Arc,
memory_chunks: RwLock>,
chunk_size: usize,
}
@@ -342,6 +344,7 @@ impl ChunkStore {
remote_fs: Arc,
cluster: Arc,
config: Arc,
+ metadata_cache_factory: Arc,
chunk_size: usize,
) -> Arc {
let store = ChunkStore {
@@ -349,6 +352,7 @@ impl ChunkStore {
remote_fs,
cluster,
config,
+ metadata_cache_factory,
memory_chunks: RwLock::new(HashMap::new()),
chunk_size,
};
@@ -588,8 +592,10 @@ impl ChunkDataStore for ChunkStore {
)))])
} else {
let (local_file, index) = self.download_chunk(chunk, partition, index).await?;
+ let metadata_cache_factory: Arc =
+ self.metadata_cache_factory.clone();
Ok(cube_ext::spawn_blocking(move || -> Result<_, CubeError> {
- let parquet = ParquetTableStore::new(index, ROW_GROUP_SIZE);
+ let parquet = ParquetTableStore::new(index, ROW_GROUP_SIZE, metadata_cache_factory);
Ok(parquet.read_columns(&local_file)?)
})
.await??)
@@ -804,6 +810,7 @@ mod tests {
use crate::{metastore::ColumnType, table::TableValue};
use cuberockstore::rocksdb::{Options, DB};
use datafusion::arrow::array::{Int64Array, StringArray};
+ use datafusion::physical_plan::parquet::BasicMetadataCacheFactory;
use std::fs;
use std::path::{Path, PathBuf};
@@ -888,6 +895,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -942,6 +950,7 @@ mod tests {
remote_fs.clone(),
Arc::new(MockCluster::new()),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
10,
);
@@ -984,6 +993,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -1044,6 +1054,7 @@ mod tests {
remote_fs.clone(),
Arc::new(MockCluster::new()),
config.config_obj(),
+ Arc::new(BasicMetadataCacheFactory::new()),
10,
);
@@ -1094,6 +1105,7 @@ mod tests {
None,
None,
false,
+ None,
)
.await
.unwrap();
@@ -1372,8 +1384,14 @@ impl ChunkStore {
let local_file = self.remote_fs.temp_upload_path(remote_path.clone()).await?;
let local_file = scopeguard::guard(local_file, ensure_temp_file_is_dropped);
let local_file_copy = local_file.clone();
+ let metadata_cache_factory: Arc =
+ self.metadata_cache_factory.clone();
cube_ext::spawn_blocking(move || -> Result<(), CubeError> {
- let parquet = ParquetTableStore::new(index.get_row().clone(), ROW_GROUP_SIZE);
+ let parquet = ParquetTableStore::new(
+ index.get_row().clone(),
+ ROW_GROUP_SIZE,
+ metadata_cache_factory,
+ );
parquet.write_data(&local_file_copy, data)?;
Ok(())
})
diff --git a/rust/cubestore/cubestore/src/streaming/kafka.rs b/rust/cubestore/cubestore/src/streaming/kafka.rs
index a6c12a5dfe7e2..f3e9b57d39411 100644
--- a/rust/cubestore/cubestore/src/streaming/kafka.rs
+++ b/rust/cubestore/cubestore/src/streaming/kafka.rs
@@ -11,6 +11,7 @@ use async_std::stream;
use async_trait::async_trait;
use datafusion::arrow::array::ArrayRef;
use datafusion::cube_ext;
+use datafusion::physical_plan::parquet::MetadataCacheFactory;
use futures::Stream;
use json::object::Object;
use json::JsonValue;
@@ -59,6 +60,7 @@ impl KafkaStreamingSource {
kafka_client: Arc,
use_ssl: bool,
trace_obj: Option,
+ metadata_cache_factory: Arc,
) -> Result {
let (post_processing_plan, columns, unique_key_columns, seq_column_index) =
if let Some(select_statement) = select_statement {
@@ -69,7 +71,7 @@ impl KafkaStreamingSource {
columns.clone(),
source_columns,
);
- let plan = planner.build(select_statement.clone())?;
+ let plan = planner.build(select_statement.clone(), metadata_cache_factory)?;
let columns = plan.source_columns().clone();
let seq_column_index = plan.source_seq_column_index();
let unique_columns = plan.source_unique_columns().clone();
diff --git a/rust/cubestore/cubestore/src/streaming/kafka_post_processing.rs b/rust/cubestore/cubestore/src/streaming/kafka_post_processing.rs
index ab5034c06287e..4f47517e62e9e 100644
--- a/rust/cubestore/cubestore/src/streaming/kafka_post_processing.rs
+++ b/rust/cubestore/cubestore/src/streaming/kafka_post_processing.rs
@@ -10,8 +10,9 @@ use datafusion::logical_plan::{
};
use datafusion::physical_plan::empty::EmptyExec;
use datafusion::physical_plan::memory::MemoryExec;
+use datafusion::physical_plan::parquet::MetadataCacheFactory;
use datafusion::physical_plan::{collect, ExecutionPlan};
-use datafusion::prelude::ExecutionContext;
+use datafusion::prelude::{ExecutionConfig, ExecutionContext};
use datafusion::sql::parser::Statement as DFStatement;
use datafusion::sql::planner::SqlToRel;
use sqlparser::ast::Expr as SQExpr;
@@ -126,7 +127,11 @@ impl KafkaPostProcessPlanner {
}
}
- pub fn build(&self, select_statement: String) -> Result {
+ pub fn build(
+ &self,
+ select_statement: String,
+ metadata_cache_factory: Arc,
+ ) -> Result {
let target_schema = Arc::new(Schema::new(
self.columns
.iter()
@@ -137,7 +142,7 @@ impl KafkaPostProcessPlanner {
let source_unique_columns = self.extract_source_unique_columns(&logical_plan)?;
let (projection_plan, filter_plan) =
- self.make_projection_and_filter_physical_plans(&logical_plan)?;
+ self.make_projection_and_filter_physical_plans(&logical_plan, metadata_cache_factory)?;
if target_schema != projection_plan.schema() {
return Err(CubeError::user(format!(
"Table schema: {:?} don't match select_statement result schema: {:?}",
@@ -352,6 +357,7 @@ impl KafkaPostProcessPlanner {
fn make_projection_and_filter_physical_plans(
&self,
plan: &LogicalPlan,
+ metadata_cache_factory: Arc,
) -> Result<(Arc, Option>), CubeError> {
let source_schema = Arc::new(Schema::new(
self.source_columns
@@ -373,7 +379,10 @@ impl KafkaPostProcessPlanner {
schema.clone(),
projection_input.clone(),
)?;
- let plan_ctx = Arc::new(ExecutionContext::new());
+ let plan_ctx = Arc::new(ExecutionContext::with_config(
+ ExecutionConfig::new()
+ .with_metadata_cache_factory(metadata_cache_factory),
+ ));
let projection_phys_plan = plan_ctx
.create_physical_plan(&projection_plan)?
@@ -393,7 +402,9 @@ impl KafkaPostProcessPlanner {
LogicalPlan::TableScan { .. } => {
let projection_plan =
self.make_projection_plan(expr, schema.clone(), projection_input.clone())?;
- let plan_ctx = Arc::new(ExecutionContext::new());
+ let plan_ctx = Arc::new(ExecutionContext::with_config(
+ ExecutionConfig::new().with_metadata_cache_factory(metadata_cache_factory),
+ ));
let projection_phys_plan = plan_ctx
.create_physical_plan(&projection_plan)?
.with_new_children(vec![empty_exec.clone()])?;
diff --git a/rust/cubestore/cubestore/src/streaming/mod.rs b/rust/cubestore/cubestore/src/streaming/mod.rs
index 46f6db8827fab..f73426df12b33 100644
--- a/rust/cubestore/cubestore/src/streaming/mod.rs
+++ b/rust/cubestore/cubestore/src/streaming/mod.rs
@@ -23,6 +23,7 @@ use chrono::Utc;
use datafusion::arrow::array::ArrayBuilder;
use datafusion::arrow::array::ArrayRef;
use datafusion::cube_ext::ordfloat::OrdF64;
+use datafusion::physical_plan::parquet::MetadataCacheFactory;
use futures::future::join_all;
use futures::stream::StreamExt;
use futures::Stream;
@@ -57,6 +58,7 @@ pub struct StreamingServiceImpl {
chunk_store: Arc,
ksql_client: Arc,
kafka_client: Arc,
+ metadata_cache_factory: Arc,
}
crate::di_service!(StreamingServiceImpl, [StreamingService]);
@@ -68,6 +70,7 @@ impl StreamingServiceImpl {
chunk_store: Arc,
ksql_client: Arc,
kafka_client: Arc,
+ metadata_cache_factory: Arc,
) -> Arc {
Arc::new(Self {
config_obj,
@@ -75,6 +78,7 @@ impl StreamingServiceImpl {
chunk_store,
ksql_client,
kafka_client,
+ metadata_cache_factory,
})
}
@@ -165,6 +169,7 @@ impl StreamingServiceImpl {
self.kafka_client.clone(),
*use_ssl,
trace_obj,
+ self.metadata_cache_factory.clone(),
)?)),
}
}
diff --git a/rust/cubestore/cubestore/src/table/parquet.rs b/rust/cubestore/cubestore/src/table/parquet.rs
index 62bb1a5d8f2e0..56cd5f8c939bd 100644
--- a/rust/cubestore/cubestore/src/table/parquet.rs
+++ b/rust/cubestore/cubestore/src/table/parquet.rs
@@ -6,7 +6,7 @@ use datafusion::arrow::datatypes::Schema;
use datafusion::arrow::record_batch::RecordBatch;
use datafusion::parquet::arrow::{ArrowReader, ArrowWriter, ParquetFileArrowReader};
use datafusion::parquet::file::properties::{WriterProperties, WriterVersion};
-use datafusion::physical_plan::parquet::{NoopParquetMetadataCache, ParquetMetadataCache};
+use datafusion::physical_plan::parquet::{MetadataCacheFactory, ParquetMetadataCache};
use std::fs::File;
use std::sync::Arc;
@@ -36,15 +36,47 @@ impl CubestoreParquetMetadataCache for CubestoreParquetMetadataCacheImpl {
}
}
+pub trait CubestoreMetadataCacheFactory: DIService + Send + Sync {
+ fn cache_factory(&self) -> Arc;
+}
+
+pub struct CubestoreMetadataCacheFactoryImpl {
+ metadata_cache_factory: Arc,
+}
+
+crate::di_service!(
+ CubestoreMetadataCacheFactoryImpl,
+ [CubestoreMetadataCacheFactory]
+);
+
+impl CubestoreMetadataCacheFactoryImpl {
+ pub fn new(
+ metadata_cache_factory: Arc,
+ ) -> Arc {
+ Arc::new(CubestoreMetadataCacheFactoryImpl {
+ metadata_cache_factory,
+ })
+ }
+}
+
+impl CubestoreMetadataCacheFactory for CubestoreMetadataCacheFactoryImpl {
+ fn cache_factory(&self) -> Arc {
+ self.metadata_cache_factory.clone()
+ }
+}
+
pub struct ParquetTableStore {
table: Index,
row_group_size: usize,
+ metadata_cache_factory: Arc,
}
impl ParquetTableStore {
pub fn read_columns(&self, path: &str) -> Result, CubeError> {
let mut r = ParquetFileArrowReader::new(Arc::new(
- NoopParquetMetadataCache::new().file_reader(path)?,
+ self.metadata_cache_factory
+ .make_noop_cache()
+ .file_reader(path)?,
));
let mut batches = Vec::new();
for b in r.get_record_reader(self.row_group_size)? {
@@ -55,10 +87,15 @@ impl ParquetTableStore {
}
impl ParquetTableStore {
- pub fn new(table: Index, row_group_size: usize) -> ParquetTableStore {
+ pub fn new(
+ table: Index,
+ row_group_size: usize,
+ metadata_cache_factory: Arc,
+ ) -> ParquetTableStore {
ParquetTableStore {
table,
row_group_size,
+ metadata_cache_factory,
}
}
@@ -77,16 +114,18 @@ impl ParquetTableStore {
}
pub fn writer_props(&self) -> WriterProperties {
- WriterProperties::builder()
- .set_max_row_group_size(self.row_group_size)
- .set_writer_version(WriterVersion::PARQUET_2_0)
- .build()
+ self.metadata_cache_factory.build_writer_props(
+ WriterProperties::builder()
+ .set_max_row_group_size(self.row_group_size)
+ .set_writer_version(WriterVersion::PARQUET_2_0),
+ )
}
pub fn write_data(&self, dest_file: &str, columns: Vec) -> Result<(), CubeError> {
let schema = Arc::new(arrow_schema(&self.table));
let batch = RecordBatch::try_new(schema.clone(), columns.to_vec())?;
+ // TODO: Just look for every place SerializedFileWriter is constructed and see if we missed one.
let mut w =
ArrowWriter::try_new(File::create(dest_file)?, schema, Some(self.writer_props()))?;
w.write(&batch)?;
@@ -120,6 +159,7 @@ mod tests {
use datafusion::parquet::file::reader::FileReader;
use datafusion::parquet::file::reader::SerializedFileReader;
use datafusion::parquet::file::statistics::{Statistics, TypedStatistics};
+ use datafusion::physical_plan::parquet::BasicMetadataCacheFactory;
use itertools::Itertools;
use pretty_assertions::assert_eq;
use std::sync::Arc;
@@ -153,7 +193,11 @@ mod tests {
.unwrap();
let dest_file = NamedTempFile::new().unwrap();
- let store = ParquetTableStore::new(index, ROW_GROUP_SIZE);
+ let store = ParquetTableStore::new(
+ index,
+ ROW_GROUP_SIZE,
+ Arc::new(BasicMetadataCacheFactory::new()),
+ );
let data: Vec = vec![
Arc::new(StringArray::from(vec![
@@ -243,6 +287,7 @@ mod tests {
)
.unwrap(),
row_group_size: 10,
+ metadata_cache_factory: Arc::new(BasicMetadataCacheFactory::new()),
};
let file = NamedTempFile::new().unwrap();
let file_name = file.path().to_str().unwrap();
@@ -302,7 +347,11 @@ mod tests {
let count_min = compaction::write_to_files(
to_stream(to_split_batch).await,
to_split.len(),
- ParquetTableStore::new(store.table.clone(), store.row_group_size),
+ ParquetTableStore::new(
+ store.table.clone(),
+ store.row_group_size,
+ Arc::new(BasicMetadataCacheFactory::new()),
+ ),
vec![split_1.to_string(), split_2.to_string()],
)
.await
@@ -364,7 +413,11 @@ mod tests {
)
.unwrap();
let tmp_file = NamedTempFile::new().unwrap();
- let store = ParquetTableStore::new(index.clone(), NUM_ROWS);
+ let store = ParquetTableStore::new(
+ index.clone(),
+ NUM_ROWS,
+ Arc::new(BasicMetadataCacheFactory::new()),
+ );
store
.write_data(
tmp_file.path().to_str().unwrap(),
@@ -421,7 +474,11 @@ mod tests {
let data = rows_to_columns(&index.columns(), &rows);
- let w = ParquetTableStore::new(index.clone(), NUM_ROWS);
+ let w = ParquetTableStore::new(
+ index.clone(),
+ NUM_ROWS,
+ Arc::new(BasicMetadataCacheFactory::new()),
+ );
w.write_data(file, data.clone()).unwrap();
let r = concat_record_batches(&w.read_columns(file).unwrap());
assert_eq_columns!(r.columns(), &data);
From 70ff901b9045c04e93042cdf9c854306efd3e28b Mon Sep 17 00:00:00 2001
From: Mike Nitsenko
Date: Tue, 10 Sep 2024 22:42:25 +0500
Subject: [PATCH 015/415] docs: describe additional meta params for MDX API
(#8690)
* update
* update
---
.../product/apis-integrations/mdx-api.mdx | 65 ++++++++++++++++++-
1 file changed, 64 insertions(+), 1 deletion(-)
diff --git a/docs/pages/product/apis-integrations/mdx-api.mdx b/docs/pages/product/apis-integrations/mdx-api.mdx
index e54dd77ecb301..700348fca11b6 100644
--- a/docs/pages/product/apis-integrations/mdx-api.mdx
+++ b/docs/pages/product/apis-integrations/mdx-api.mdx
@@ -50,9 +50,72 @@ views:
- city
```
+### Dimension keys
+
+You can define a member that will be used as a key for a dimension in the cube's model file.
+
+```yaml
+cubes:
+ - name: users
+ sql_table: USERS
+ public: false
+
+ dimensions:
+ - name: id
+ sql: "{CUBE}.ID"
+ type: number
+ primary_key: true
+
+ - name: first_name
+ sql: FIRST_NAME
+ type: string
+ meta:
+ key_member: users_id
+```
+
+### Dimension labels
+
+You can define a member that will be used as a label for a dimension in the cube's model file.
+
+```yaml
+cubes:
+ - name: users
+ sql_table: USERS
+ public: false
+
+ dimensions:
+ - name: id
+ sql: "{CUBE}.ID"
+ type: number
+ meta:
+ label_member: users_first_name
+```
+
+### Custom properties
+
+You can define custom properties for dimensions in the cube's model file.
+
+```yaml
+cubes:
+ - name: users
+ sql_table: USERS
+ public: false
+
+ dimensions:
+ - name: id
+ sql: "{CUBE}.ID"
+ type: number
+ meta:
+ properties:
+ - name: "Property A"
+ column: users_first_name
+ - name: "Property B"
+ value: users_city
+```
+
### Measure groups
-MDX API supports organizing measures into groups (folders). You can define measure groups in the view's schema file.
+MDX API supports organizing measures into groups (folders). You can define measure groups in the view's model file.
```yaml
views:
From cdfbd1e21ffcf111e40c525f8a391cc0dcee3c11 Mon Sep 17 00:00:00 2001
From: Dmitriy Rusov
Date: Wed, 11 Sep 2024 11:53:09 +0200
Subject: [PATCH 016/415] feat: ksql and rollup pre-aggregations (#8619)
Added support for pre-aggregations for ksqldb using select statement and direct load from kafka
---
.../src/driver.interface.ts | 2 +
.../src/CubeStoreDriver.ts | 5 +
packages/cubejs-ksql-driver/src/KsqlDriver.ts | 41 +++--
packages/cubejs-ksql-driver/src/KsqlQuery.ts | 6 +-
.../src/orchestrator/PreAggregations.ts | 25 ++-
.../src/adapter/BaseQuery.js | 63 ++++++-
.../src/adapter/PreAggregations.js | 21 ++-
.../src/compiler/CubeEvaluator.ts | 6 +
.../src/compiler/CubeValidator.ts | 13 +-
.../transpilers/CubePropContextTranspiler.ts | 1 +
.../postgres/pre-aggregations.test.ts | 2 +
packages/cubejs-testing-shared/package.json | 3 +-
.../db-runner.abstract.ts | 3 +
.../src/db-container-runners/index.ts | 2 +
.../src/db-container-runners/kafka.ts | 31 ++++
.../src/db-container-runners/ksql.ts | 82 +++++++++
.../birdbox-fixtures/lambda/cube.js | 21 +++
.../lambda/schema/Requests.js | 96 +++++++++++
packages/cubejs-testing/package.json | 3 +-
.../cubejs-testing/test/smoke-lambda.test.ts | 48 +++++-
yarn.lock | 156 +++++++++++++++++-
21 files changed, 594 insertions(+), 36 deletions(-)
create mode 100644 packages/cubejs-testing-shared/src/db-container-runners/kafka.ts
create mode 100644 packages/cubejs-testing-shared/src/db-container-runners/ksql.ts
create mode 100644 packages/cubejs-testing/birdbox-fixtures/lambda/schema/Requests.js
diff --git a/packages/cubejs-base-driver/src/driver.interface.ts b/packages/cubejs-base-driver/src/driver.interface.ts
index 0122014c72c87..15aecd3518bc5 100644
--- a/packages/cubejs-base-driver/src/driver.interface.ts
+++ b/packages/cubejs-base-driver/src/driver.interface.ts
@@ -90,6 +90,7 @@ export interface StreamTableData extends DownloadTableBase {
export interface StreamingSourceTableData extends DownloadTableBase {
streamingTable: string;
selectStatement?: string;
+ sourceTable?: any,
partitions?: number;
streamOffset?: string;
streamingSource: {
@@ -130,6 +131,7 @@ export type StreamOptions = {
export type StreamingSourceOptions = {
streamOffset?: boolean;
+ outputColumnTypes?: TableColumn[]
};
export interface DownloadQueryResultsBase {
diff --git a/packages/cubejs-cubestore-driver/src/CubeStoreDriver.ts b/packages/cubejs-cubestore-driver/src/CubeStoreDriver.ts
index 8685735979bb3..a38e4e3cfa36e 100644
--- a/packages/cubejs-cubestore-driver/src/CubeStoreDriver.ts
+++ b/packages/cubejs-cubestore-driver/src/CubeStoreDriver.ts
@@ -46,6 +46,7 @@ type CreateTableOptions = {
files?: string[]
aggregations?: string
selectStatement?: string
+ sourceTable?: any
sealAt?: string
delimiter?: string
};
@@ -118,6 +119,9 @@ export class CubeStoreDriver extends BaseDriver implements DriverInterface {
if (options.selectStatement) {
withEntries.push(`select_statement = ${escape(options.selectStatement)}`);
}
+ if (options.sourceTable) {
+ withEntries.push(`source_table = ${escape(`CREATE TABLE ${options.sourceTable.tableName} (${options.sourceTable.types.map(t => `${t.name} ${this.fromGenericType(t.type)}`).join(', ')})`)}`);
+ }
if (options.streamOffset) {
withEntries.push(`stream_offset = '${options.streamOffset}'`);
}
@@ -431,6 +435,7 @@ export class CubeStoreDriver extends BaseDriver implements DriverInterface {
indexes,
files: locations,
selectStatement: tableData.selectStatement,
+ sourceTable: tableData.sourceTable,
streamOffset: tableData.streamOffset,
sealAt
};
diff --git a/packages/cubejs-ksql-driver/src/KsqlDriver.ts b/packages/cubejs-ksql-driver/src/KsqlDriver.ts
index 4bce1e812475d..651d29c883ec3 100644
--- a/packages/cubejs-ksql-driver/src/KsqlDriver.ts
+++ b/packages/cubejs-ksql-driver/src/KsqlDriver.ts
@@ -10,7 +10,7 @@ import {
} from '@cubejs-backend/shared';
import {
BaseDriver, DriverCapabilities,
- DriverInterface, QueryOptions,
+ DriverInterface, TableColumn,
} from '@cubejs-backend/base-driver';
import { Kafka } from 'kafkajs';
import sqlstring, { format as formatSql } from 'sqlstring';
@@ -64,6 +64,12 @@ type KsqlDescribeResponse = {
}
};
+type KsqlQueryOptions = {
+ outputColumnTypes?: TableColumn[],
+ streamOffset?: string,
+ selectStatement?: string,
+};
+
/**
* KSQL driver class.
*/
@@ -161,7 +167,7 @@ export class KsqlDriver extends BaseDriver implements DriverInterface {
}
}
- public async query(query: string, values?: unknown[], options: { streamOffset?: string } = {}): Promise {
+ public async query(query: string, values?: unknown[], options: KsqlQueryOptions = {}): Promise {
if (query.toLowerCase().startsWith('select')) {
throw new Error('Select queries for ksql allowed only from Cube Store. In order to query ksql create pre-aggregation first.');
}
@@ -261,13 +267,15 @@ export class KsqlDriver extends BaseDriver implements DriverInterface {
}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
- public loadPreAggregationIntoTable(preAggregationTableName: string, loadSql: string, params: any[], options: any): Promise {
- return this.query(loadSql.replace(preAggregationTableName, this.tableDashName(preAggregationTableName)), params, { streamOffset: options?.streamOffset });
+ public loadPreAggregationIntoTable(preAggregationTableName: string, loadSql: string, params: any[], options: KsqlQueryOptions): Promise {
+ const { streamOffset } = options || {};
+ return this.query(loadSql.replace(preAggregationTableName, this.tableDashName(preAggregationTableName)), params, { streamOffset });
}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
public async downloadTable(table: string, options: any): Promise {
- return this.getStreamingTableData(this.tableDashName(table), { streamOffset: options?.streamOffset });
+ const { streamOffset } = options || {};
+ return this.getStreamingTableData(this.tableDashName(table), { streamOffset });
}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
@@ -278,11 +286,12 @@ export class KsqlDriver extends BaseDriver implements DriverInterface {
}
const selectStatement = sqlstring.format(query, params);
- return this.getStreamingTableData(table, { selectStatement, streamOffset: options?.streamOffset });
+ const { streamOffset, outputColumnTypes } = options || {};
+ return this.getStreamingTableData(table, { selectStatement, streamOffset, outputColumnTypes });
}
- private async getStreamingTableData(streamingTable: string, options: { selectStatement?: string, streamOffset?: string } = {}) {
- const { selectStatement, streamOffset } = options;
+ private async getStreamingTableData(streamingTable: string, options: KsqlQueryOptions = {}) {
+ const { selectStatement, streamOffset, outputColumnTypes } = options;
const describe = await this.describeTable(streamingTable);
const name = this.config.streamingSourceName || 'default';
const kafkaDirectDownload = !!this.config.kafkaHost;
@@ -304,13 +313,20 @@ export class KsqlDriver extends BaseDriver implements DriverInterface {
url: this.config.url
}
};
+ const sourceTableTypes = await this.tableColumnTypes(streamingTable, describe);
+ streamingTable = kafkaDirectDownload ? describe.sourceDescription?.topic : streamingTable;
+
return {
- types: await this.tableColumnTypes(streamingTable, describe),
+ types: outputColumnTypes || sourceTableTypes,
partitions: describe.sourceDescription?.partitions,
- streamingTable: kafkaDirectDownload ? describe.sourceDescription?.topic : streamingTable,
+ streamingTable,
streamOffset,
selectStatement,
- streamingSource
+ streamingSource,
+ sourceTable: outputColumnTypes ? {
+ types: sourceTableTypes,
+ tableName: streamingTable
+ } : null
};
}
@@ -344,7 +360,8 @@ export class KsqlDriver extends BaseDriver implements DriverInterface {
public capabilities(): DriverCapabilities {
return {
- streamingSource: true
+ streamingSource: true,
+ unloadWithoutTempTable: true,
};
}
}
diff --git a/packages/cubejs-ksql-driver/src/KsqlQuery.ts b/packages/cubejs-ksql-driver/src/KsqlQuery.ts
index 082a249a78c40..3af3a71f4f16e 100644
--- a/packages/cubejs-ksql-driver/src/KsqlQuery.ts
+++ b/packages/cubejs-ksql-driver/src/KsqlQuery.ts
@@ -55,6 +55,10 @@ export class KsqlQuery extends BaseQuery {
return `\`${name}\``;
}
+ public castToString(sql: string) {
+ return `CAST(${sql} as varchar(255))`;
+ }
+
public concatStringsSql(strings: string[]) {
return `CONCAT(${strings.join(', ')})`;
}
@@ -111,7 +115,7 @@ export class KsqlQuery extends BaseQuery {
}
public static extractTableFromSimpleSelectAsteriskQuery(sql: string) {
- const match = sql.match(/^\s*select\s+\*\s+from\s+([a-zA-Z0-9_\-`".*]+)\s*/i);
+ const match = sql.replace(/\n/g, ' ').match(/^\s*select\s+.*\s+from\s+([a-zA-Z0-9_\-`".*]+)\s*/i);
return match && match[1];
}
}
diff --git a/packages/cubejs-query-orchestrator/src/orchestrator/PreAggregations.ts b/packages/cubejs-query-orchestrator/src/orchestrator/PreAggregations.ts
index d660be85bc42b..82935e33a94a9 100644
--- a/packages/cubejs-query-orchestrator/src/orchestrator/PreAggregations.ts
+++ b/packages/cubejs-query-orchestrator/src/orchestrator/PreAggregations.ts
@@ -104,6 +104,9 @@ function getStructureVersion(preAggregation) {
if (preAggregation.streamOffset) {
versionArray.push(preAggregation.streamOffset);
}
+ if (preAggregation.outputColumnTypes) {
+ versionArray.push(preAggregation.outputColumnTypes);
+ }
return version(versionArray.length === 1 ? versionArray[0] : versionArray);
}
@@ -815,6 +818,9 @@ export class PreAggregationLoader {
if (this.preAggregation.streamOffset) {
versionArray.push(this.preAggregation.streamOffset);
}
+ if (this.preAggregation.outputColumnTypes) {
+ versionArray.push(this.preAggregation.outputColumnTypes);
+ }
versionArray.push(invalidationKeys);
return version(versionArray);
}
@@ -964,7 +970,11 @@ export class PreAggregationLoader {
targetTableName,
query,
params,
- { streamOffset: this.preAggregation.streamOffset, ...queryOptions }
+ {
+ streamOffset: this.preAggregation.streamOffset,
+ outputColumnTypes: this.preAggregation.outputColumnTypes,
+ ...queryOptions
+ }
));
await this.createIndexes(client, newVersionEntry, saveCancelFn, queryOptions);
@@ -1107,7 +1117,11 @@ export class PreAggregationLoader {
targetTableName,
query,
params,
- { streamOffset: this.preAggregation.streamOffset, ...queryOptions }
+ {
+ streamOffset: this.preAggregation.streamOffset,
+ outputColumnTypes: this.preAggregation.outputColumnTypes,
+ ...queryOptions
+ }
));
return queryOptions;
@@ -1156,6 +1170,7 @@ export class PreAggregationLoader {
sql,
params, {
streamOffset: this.preAggregation.streamOffset,
+ outputColumnTypes: this.preAggregation.outputColumnTypes,
...queryOptions,
...capabilities,
...this.getStreamingOptions(),
@@ -1261,7 +1276,11 @@ export class PreAggregationLoader {
tableData.rowStream = stream;
}
} else {
- tableData = await saveCancelFn(client.downloadTable(table, { streamOffset: this.preAggregation.streamOffset, ...externalDriverCapabilities }));
+ tableData = await saveCancelFn(client.downloadTable(table, {
+ streamOffset: this.preAggregation.streamOffset,
+ outputColumnTypes: this.preAggregation.outputColumnTypes,
+ ...externalDriverCapabilities
+ }));
}
if (!tableData.types) {
diff --git a/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js b/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js
index 9a0de0cd60cf3..9a7ca0fff16f0 100644
--- a/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js
+++ b/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js
@@ -2746,9 +2746,12 @@ export class BaseQuery {
}
newSubQueryForCube(cube, options) {
- return this.options.queryFactory
- ? this.options.queryFactory.createQuery(cube, this.compilers, this.subQueryOptions(options))
- : this.newSubQuery(options);
+ if (this.options.queryFactory) {
+ options.paramAllocator = null;
+ return this.options.queryFactory.createQuery(cube, this.compilers, this.subQueryOptions(options));
+ }
+
+ return this.newSubQuery(options);
}
subQueryOptions(options) {
@@ -2942,6 +2945,60 @@ export class BaseQuery {
);
}
+ preAggregationOutputColumnTypes(cube, preAggregation) {
+ return this.cacheValue(
+ ['preAggregationOutputColumnTypes', cube, JSON.stringify(preAggregation)],
+ () => {
+ if (!preAggregation.outputColumnTypes) {
+ return null;
+ }
+
+ if (preAggregation.type === 'rollup') {
+ const query = this.preAggregations.rollupPreAggregationQuery(cube, preAggregation);
+
+ const evaluatedMapOutputColumnTypes = preAggregation.outputColumnTypes.reduce((acc, outputColumnType) => {
+ acc.set(outputColumnType.name, outputColumnType);
+ return acc;
+ }, new Map());
+
+ const findSchemaType = member => {
+ const outputSchemaType = evaluatedMapOutputColumnTypes.get(member);
+ if (!outputSchemaType) {
+ throw new UserError(`Output schema type for ${member} not found in pre-aggregation ${preAggregation}`);
+ }
+
+ return {
+ name: this.aliasName(member),
+ type: outputSchemaType.type,
+ };
+ };
+
+ // The order of the output columns is important, it should match the order in the select statement
+ const outputColumnTypes = [
+ ...(query.dimensions || []).map(d => findSchemaType(d.dimension)),
+ ...(query.timeDimensions || []).map(t => ({
+ name: `${this.aliasName(t.dimension)}_${t.granularity}`,
+ type: 'TIMESTAMP'
+ })),
+ ...(query.measures || []).map(m => findSchemaType(m.measure)),
+ ];
+
+ return outputColumnTypes;
+ }
+ throw new UserError('Output schema is only supported for rollup pre-aggregations');
+ },
+ { inputProps: { }, cache: this.queryCache }
+ );
+ }
+
+ preAggregationUniqueKeyColumns(cube, preAggregation) {
+ if (preAggregation.uniqueKeyColumns) {
+ return preAggregation.uniqueKeyColumns.map(key => this.aliasName(`${cube}.${key}`));
+ }
+
+ return this.dimensionColumns();
+ }
+
preAggregationReadOnly(_cube, _preAggregation) {
return false;
}
diff --git a/packages/cubejs-schema-compiler/src/adapter/PreAggregations.js b/packages/cubejs-schema-compiler/src/adapter/PreAggregations.js
index 9edc16c70ca53..bb8905aab3203 100644
--- a/packages/cubejs-schema-compiler/src/adapter/PreAggregations.js
+++ b/packages/cubejs-schema-compiler/src/adapter/PreAggregations.js
@@ -1,4 +1,5 @@
import R from 'ramda';
+import { FROM_PARTITION_RANGE, TO_PARTITION_RANGE } from '@cubejs-backend/shared';
import { UserError } from '../compiler/UserError';
@@ -189,7 +190,7 @@ export class PreAggregations {
const uniqueKeyColumnsDefault = () => null;
const uniqueKeyColumns = ({
- rollup: () => queryForSqlEvaluation.dimensionColumns(),
+ rollup: () => queryForSqlEvaluation.preAggregationUniqueKeyColumns(cube, preAggregation),
originalSql: () => preAggregation.uniqueKeyColumns || null
}[preAggregation.type] || uniqueKeyColumnsDefault)();
@@ -209,6 +210,7 @@ export class PreAggregations {
preAggregationsSchema: queryForSqlEvaluation.preAggregationSchema(),
loadSql: queryForSqlEvaluation.preAggregationLoadSql(cube, preAggregation, tableName),
sql: queryForSqlEvaluation.preAggregationSql(cube, preAggregation),
+ outputColumnTypes: queryForSqlEvaluation.preAggregationOutputColumnTypes(cube, preAggregation),
uniqueKeyColumns,
aggregationsColumns,
dataSource: queryForSqlEvaluation.dataSource,
@@ -219,7 +221,7 @@ export class PreAggregations {
queryForSqlEvaluation.parseSecondDuration(preAggregation.refreshKey.updateWindow),
preAggregationStartEndQueries:
(preAggregation.partitionGranularity || references.timeDimensions[0]?.granularity) &&
- this.refreshRangeQuery().preAggregationStartEndQueries(cube, preAggregation),
+ this.refreshRangeQuery(cube).preAggregationStartEndQueries(cube, preAggregation),
matchedTimeDimensionDateRange:
preAggregation.partitionGranularity && (
matchedTimeDimension && matchedTimeDimension.boundaryDateRangeFormatted() ||
@@ -1041,12 +1043,15 @@ export class PreAggregations {
return { preAggregations, result };
}
- refreshRangeQuery() {
- return this.query.newSubQuery({
- rowLimit: null,
- offset: null,
- preAggregationQuery: true,
- });
+ refreshRangeQuery(cube) {
+ return this.query.newSubQueryForCube(
+ cube,
+ {
+ rowLimit: null,
+ offset: null,
+ preAggregationQuery: true,
+ }
+ );
}
originalSqlPreAggregationQuery(cube, aggregation) {
diff --git a/packages/cubejs-schema-compiler/src/compiler/CubeEvaluator.ts b/packages/cubejs-schema-compiler/src/compiler/CubeEvaluator.ts
index 9358e2eae1bde..63c6c97b8c10f 100644
--- a/packages/cubejs-schema-compiler/src/compiler/CubeEvaluator.ts
+++ b/packages/cubejs-schema-compiler/src/compiler/CubeEvaluator.ts
@@ -273,6 +273,12 @@ export class CubeEvaluator extends CubeSymbols {
preAggregation.refreshRangeEnd = preAggregation.buildRangeEnd;
delete preAggregation.buildRangeEnd;
}
+
+ if (preAggregation.outputColumnTypes) {
+ preAggregation.outputColumnTypes.forEach(column => {
+ column.name = this.evaluateReferences(cube.name, column.member, { originalSorting: true });
+ });
+ }
}
}
}
diff --git a/packages/cubejs-schema-compiler/src/compiler/CubeValidator.ts b/packages/cubejs-schema-compiler/src/compiler/CubeValidator.ts
index c48825440006e..3e79893ea4f8a 100644
--- a/packages/cubejs-schema-compiler/src/compiler/CubeValidator.ts
+++ b/packages/cubejs-schema-compiler/src/compiler/CubeValidator.ts
@@ -248,6 +248,10 @@ const BasePreAggregationWithoutPartitionGranularity = {
},
readOnly: Joi.boolean().strict(),
streamOffset: Joi.any().valid('earliest', 'latest'),
+ outputColumnTypes: Joi.array().items(Joi.object().keys({
+ member: Joi.func().required(),
+ type: Joi.string().required()
+ })),
};
const BasePreAggregation = {
@@ -390,6 +394,7 @@ const RollUpSchema = condition(
measureReferences: Joi.func(),
dimensionReferences: Joi.func(),
segmentReferences: Joi.func(),
+ uniqueKeyColumns: Joi.array().items(Joi.string()),
}),
condition(
(s) => defined(s.timeDimension),
@@ -402,6 +407,7 @@ const RollUpSchema = condition(
measures: Joi.func(),
dimensions: Joi.func(),
segments: Joi.func(),
+ uniqueKeyColumns: Joi.array().items(Joi.string()),
}),
// Rollup with multiple time dimensions
inherit(BasePreAggregation, {
@@ -414,6 +420,7 @@ const RollUpSchema = condition(
measures: Joi.func(),
dimensions: Joi.func(),
segments: Joi.func(),
+ uniqueKeyColumns: Joi.array().items(Joi.string()),
})
)
),
@@ -422,14 +429,16 @@ const RollUpSchema = condition(
type: Joi.any().valid('rollup').required(),
measureReferences: Joi.func(),
dimensionReferences: Joi.func(),
- segmentReferences: Joi.func()
+ segmentReferences: Joi.func(),
+ uniqueKeyColumns: Joi.array().items(Joi.string()),
}),
// Rollup without References postfix
inherit(BasePreAggregation, {
type: Joi.any().valid('rollup').required(),
measures: Joi.func(),
dimensions: Joi.func(),
- segments: Joi.func()
+ segments: Joi.func(),
+ uniqueKeyColumns: Joi.array().items(Joi.string()),
})
)
);
diff --git a/packages/cubejs-schema-compiler/src/compiler/transpilers/CubePropContextTranspiler.ts b/packages/cubejs-schema-compiler/src/compiler/transpilers/CubePropContextTranspiler.ts
index dbfa432727903..f55c71dca29c2 100644
--- a/packages/cubejs-schema-compiler/src/compiler/transpilers/CubePropContextTranspiler.ts
+++ b/packages/cubejs-schema-compiler/src/compiler/transpilers/CubePropContextTranspiler.ts
@@ -19,6 +19,7 @@ export const transpiledFieldsPatterns: Array = [
/^(preAggregations|pre_aggregations)\.[_a-zA-Z][_a-zA-Z0-9]*\.indexes\.[_a-zA-Z][_a-zA-Z0-9]*\.columns$/,
/^(preAggregations|pre_aggregations)\.[_a-zA-Z][_a-zA-Z0-9]*\.(timeDimensionReference|timeDimension|time_dimension|segments|dimensions|measures|rollups|segmentReferences|dimensionReferences|measureReferences|rollupReferences)$/,
/^(preAggregations|pre_aggregations)\.[_a-zA-Z][_a-zA-Z0-9]*\.(timeDimensions|time_dimensions)\.\d+\.dimension$/,
+ /^(preAggregations|pre_aggregations)\.[_a-zA-Z][_a-zA-Z0-9]*\.(outputColumnTypes|output_column_types)\.\d+\.member$/,
/^contextMembers$/,
/^includes$/,
/^excludes$/,
diff --git a/packages/cubejs-schema-compiler/test/integration/postgres/pre-aggregations.test.ts b/packages/cubejs-schema-compiler/test/integration/postgres/pre-aggregations.test.ts
index fbc7bcf8d30f6..eb24efc8ff4b3 100644
--- a/packages/cubejs-schema-compiler/test/integration/postgres/pre-aggregations.test.ts
+++ b/packages/cubejs-schema-compiler/test/integration/postgres/pre-aggregations.test.ts
@@ -490,6 +490,8 @@ describe('PreAggregations', () => {
dimensions: [id, source],
timeDimension: createdAt,
granularity: 'day',
+ build_range_start: { sql: "SELECT DATE_SUB(NOW(), interval '96 hour')" },
+ build_range_end: { sql: "SELECT NOW()" },
partitionGranularity: 'day'
}
}
diff --git a/packages/cubejs-testing-shared/package.json b/packages/cubejs-testing-shared/package.json
index dc0431853fe67..4a64298c95530 100644
--- a/packages/cubejs-testing-shared/package.json
+++ b/packages/cubejs-testing-shared/package.json
@@ -24,9 +24,10 @@
"@cubejs-backend/query-orchestrator": "^0.35.78",
"@cubejs-backend/schema-compiler": "^0.35.80",
"@cubejs-backend/shared": "^0.35.67",
+ "@testcontainers/kafka": "~10.13.0",
"dedent": "^0.7.0",
"node-fetch": "^2.6.7",
- "testcontainers": "^10.10.4"
+ "testcontainers": "^10.13.0"
},
"devDependencies": {
"@cubejs-backend/linter": "^0.35.0",
diff --git a/packages/cubejs-testing-shared/src/db-container-runners/db-runner.abstract.ts b/packages/cubejs-testing-shared/src/db-container-runners/db-runner.abstract.ts
index 4c30b36484083..9ef781b8db039 100644
--- a/packages/cubejs-testing-shared/src/db-container-runners/db-runner.abstract.ts
+++ b/packages/cubejs-testing-shared/src/db-container-runners/db-runner.abstract.ts
@@ -1,3 +1,5 @@
+import { StartedNetwork } from 'testcontainers';
+
export interface ContainerVolumeDefinition {
source: string,
target: string,
@@ -5,6 +7,7 @@ export interface ContainerVolumeDefinition {
}
export interface DBRunnerContainerOptions {
+ network?: StartedNetwork,
volumes?: ContainerVolumeDefinition[],
version?: string,
}
diff --git a/packages/cubejs-testing-shared/src/db-container-runners/index.ts b/packages/cubejs-testing-shared/src/db-container-runners/index.ts
index 5a9194d278e2c..0e2e5a6265c24 100644
--- a/packages/cubejs-testing-shared/src/db-container-runners/index.ts
+++ b/packages/cubejs-testing-shared/src/db-container-runners/index.ts
@@ -9,3 +9,5 @@ export * from './prestodb';
export * from './mssql';
export * from './trino';
export * from './oracle';
+export * from './kafka';
+export * from './ksql';
diff --git a/packages/cubejs-testing-shared/src/db-container-runners/kafka.ts b/packages/cubejs-testing-shared/src/db-container-runners/kafka.ts
new file mode 100644
index 0000000000000..8ee8d8558a393
--- /dev/null
+++ b/packages/cubejs-testing-shared/src/db-container-runners/kafka.ts
@@ -0,0 +1,31 @@
+import { KafkaContainer } from '@testcontainers/kafka';
+import { DbRunnerAbstract, DBRunnerContainerOptions } from './db-runner.abstract';
+
+export class KafkaDBRunner extends DbRunnerAbstract {
+ public static startContainer(options: DBRunnerContainerOptions) {
+ const version = process.env.TEST_KAFKA_VERSION || options.version || '7.6.0';
+
+ const container = new KafkaContainer(`confluentinc/cp-kafka:${version}`)
+ .withKraft()
+ .withEnvironment({
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '1',
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: '1',
+ KAFKA_NUM_PARTITIONS: '1',
+ KAFKA_DEFAULT_REPLICATION_FACTOR: '1',
+ })
+ .withExposedPorts(9093)
+ .withStartupTimeout(10 * 1000);
+
+ if (options.network) {
+ container.withNetwork(options.network);
+ container.withNetworkAliases('kafka');
+ }
+
+ if (options.volumes) {
+ const binds = options.volumes.map(v => ({ source: v.source, target: v.target, mode: v.bindMode }));
+ container.withBindMounts(binds);
+ }
+
+ return container.start();
+ }
+}
diff --git a/packages/cubejs-testing-shared/src/db-container-runners/ksql.ts b/packages/cubejs-testing-shared/src/db-container-runners/ksql.ts
new file mode 100644
index 0000000000000..70f0c083d7a5a
--- /dev/null
+++ b/packages/cubejs-testing-shared/src/db-container-runners/ksql.ts
@@ -0,0 +1,82 @@
+import fetch from 'node-fetch';
+import { GenericContainer, StartedTestContainer } from 'testcontainers';
+import { pausePromise } from '@cubejs-backend/shared';
+import { DbRunnerAbstract, DBRunnerContainerOptions } from './db-runner.abstract';
+
+export class KsqlDBRunner extends DbRunnerAbstract {
+ public static startContainer(options: DBRunnerContainerOptions) {
+ const version = process.env.TEST_KSQL_VERSION || options.version || '7.6.0';
+
+ const bootstrapServers = 'kafka:9092';
+ const container = new GenericContainer(`confluentinc/cp-ksqldb-server:${version}`)
+ .withEnvironment({
+ KSQL_BOOTSTRAP_SERVERS: bootstrapServers,
+ KSQL_KSQL_STREAMS_BOOTSTRAP_SERVERS: bootstrapServers,
+ KSQL_KSQL_SERVICE_ID: 'service-id',
+ })
+ .withExposedPorts(8088)
+ .withStartupTimeout(30 * 1000);
+
+ if (options.network) {
+ container.withNetwork(options.network);
+ container.withNetworkAliases('ksql');
+ }
+
+ if (options.volumes) {
+ const binds = options.volumes.map(v => ({ source: v.source, target: v.target, mode: v.bindMode }));
+ container.withBindMounts(binds);
+ }
+
+ return container.start();
+ }
+
+ public static async loadData(db: StartedTestContainer) {
+ const ksqlUrl = `http://${db.getHost()}:${db.getMappedPort(8088)}`;
+
+ let attempts = 0;
+ while (attempts < 10) {
+ const res = await fetch(`${ksqlUrl}/ksql`, {
+ method: 'POST',
+ headers: { Accept: 'application/json' },
+ body: JSON.stringify({
+ ksql: 'LIST STREAMS;',
+ streamsProperties: {}
+ })
+ });
+
+ const body = await res.json();
+ if (body.message !== 'KSQL is not yet ready to serve requests.') {
+ console.log('KSQL ready');
+ break;
+ }
+ console.log('KSQL not ready yet');
+ attempts++;
+
+ await pausePromise(300);
+ }
+
+ const resCreateStream = await fetch(`${ksqlUrl}/ksql`, {
+ method: 'POST',
+ headers: { Accept: 'application/json' },
+ body: JSON.stringify({
+ ksql: "CREATE OR REPLACE STREAM REQUESTS (ID STRING, TIMESTAMP TIMESTAMP, TENANT_ID INTEGER, REQUEST_ID STRING) WITH (KAFKA_TOPIC = 'REQUESTS', KEY_FORMAT = 'JSON', PARTITIONS = 1, REPLICAS = 1, VALUE_FORMAT = 'JSON');",
+ streamsProperties: {}
+ })
+ });
+
+ console.log('KSQL CREATE STREAM', await resCreateStream.json());
+
+ const yesterday = new Date(Date.now() - 24 * 60 * 60 * 1000).toJSON();
+ const today = new Date(Date.now() - 1000).toJSON();
+ const resInsertYesterday = await fetch(`${ksqlUrl}/ksql`, {
+ method: 'POST',
+ headers: { Accept: 'application/json' },
+ body: JSON.stringify({
+ ksql: `INSERT INTO REQUESTS VALUES ('1', '${yesterday}', 1, 'req-stream-1');INSERT INTO REQUESTS VALUES ('1', '${today}', 1, 'req-stream-2');`,
+ streamsProperties: {}
+ })
+ });
+
+ console.log('KSQL INSERT', await resInsertYesterday.json());
+ }
+}
diff --git a/packages/cubejs-testing/birdbox-fixtures/lambda/cube.js b/packages/cubejs-testing/birdbox-fixtures/lambda/cube.js
index 8044afc431111..813926a2d547c 100644
--- a/packages/cubejs-testing/birdbox-fixtures/lambda/cube.js
+++ b/packages/cubejs-testing/birdbox-fixtures/lambda/cube.js
@@ -1,3 +1,6 @@
+const PostgresDriver = require("@cubejs-backend/postgres-driver");
+const KsqlDriver = require("@cubejs-backend/ksql-driver");
+
module.exports = {
orchestratorOptions: {
preAggregationsOptions: {
@@ -7,4 +10,22 @@ module.exports = {
contextToApiScopes: async () => new Promise((resolve) => {
resolve(['graphql', 'meta', 'data', 'jobs']);
}),
+ dbType: ({ dataSource }) => {
+ if (dataSource === 'default') {
+ return 'postgres';
+ }
+
+ return dataSource || 'postgres';
+ },
+ driverFactory: async ({ dataSource }) => {
+ if (dataSource === "ksql") {
+ return new KsqlDriver({
+ url: process.env.KSQL_URL,
+ kafkaHost: process.env.KSQL_KAFKA_HOST,
+ kafkaUseSsl: false,
+ });
+ }
+
+ return new PostgresDriver();
+ }
};
diff --git a/packages/cubejs-testing/birdbox-fixtures/lambda/schema/Requests.js b/packages/cubejs-testing/birdbox-fixtures/lambda/schema/Requests.js
new file mode 100644
index 0000000000000..bb88ba71c9d6e
--- /dev/null
+++ b/packages/cubejs-testing/birdbox-fixtures/lambda/schema/Requests.js
@@ -0,0 +1,96 @@
+cube("Requests", {
+ sql: `select 1 as tenant_id, 1 as deployment_id, 'req-1' as request_id, (NOW() - INTERVAL '1 day')::timestamp as timestamp
+ UNION ALL
+ select 2 as tenant_id, 1 as deployment_id, 'req-2' as request_id, (NOW() - INTERVAL '2 day')::timestamp as timestamp
+ `,
+ data_source: "postgres",
+ measures: {
+ count: {
+ type: "count",
+ },
+ },
+ dimensions: {
+ tenant_id: {
+ sql: `tenant_id`,
+ type: "number",
+ primaryKey: true,
+ },
+ request_id: {
+ sql: `request_id`,
+ type: "string",
+ primaryKey: true,
+ },
+ timestamp: {
+ sql: `timestamp`,
+ type: "time",
+ },
+ },
+ pre_aggregations: {
+ batch_streaming_lambda: {
+ type: `rollup_lambda`,
+ rollups: [batch, RequestsStream.stream],
+ },
+
+ batch: {
+ external: true,
+ type: "rollup",
+ measures: [count],
+ dimensions: [tenant_id, request_id, timestamp],
+ granularity: "day",
+ time_dimension: Requests.timestamp,
+ partition_granularity: "day",
+ build_range_start: { sql: "SELECT NOW() - INTERVAL '10 day'" },
+ build_range_end: { sql: "SELECT NOW()" },
+ },
+ },
+});
+
+cube("RequestsStream", {
+ dataSource: "ksql",
+
+ sql: `SELECT * FROM REQUESTS`,
+
+ measures: {
+ count: {
+ type: "count",
+ },
+ },
+ dimensions: {
+ tenant_id: {
+ sql: `TENANT_ID`,
+ type: "number",
+ primaryKey: true,
+ },
+ request_id: {
+ sql: `REQUEST_ID`,
+ type: "string",
+ primaryKey: true,
+ },
+ timestamp: {
+ sql: `TIMESTAMP`,
+ type: "time",
+ },
+ },
+ preAggregations: {
+ stream: {
+ streamOffset: "earliest",
+ readOnly: true,
+ external: true,
+ type: `rollup`,
+ measures: [count],
+ dimensions: [tenant_id, request_id, timestamp],
+ time_dimension: RequestsStream.timestamp,
+ granularity: "day",
+ unique_key_columns: ["tenant_id", "request_id"],
+ partition_granularity: "day",
+ build_range_start: { sql: "SELECT DATE_SUB(NOW(), interval '96 hour')" },
+ build_range_end: { sql: "SELECT NOW()" },
+ outputColumnTypes: [
+ { member: tenant_id, type: "int" },
+ { member: request_id, type: "text" },
+ { member: timestamp, type: "timestamp" },
+ { member: count, type: "int" },
+ ],
+ },
+ },
+});
diff --git a/packages/cubejs-testing/package.json b/packages/cubejs-testing/package.json
index 87f88198519a1..df8b463ed7989 100644
--- a/packages/cubejs-testing/package.json
+++ b/packages/cubejs-testing/package.json
@@ -91,6 +91,7 @@
"dependencies": {
"@cubejs-backend/cubestore-driver": "^0.35.78",
"@cubejs-backend/dotenv": "^9.0.2",
+ "@cubejs-backend/ksql-driver": "^0.35.80",
"@cubejs-backend/postgres-driver": "^0.35.80",
"@cubejs-backend/query-orchestrator": "^0.35.78",
"@cubejs-backend/schema-compiler": "^0.35.80",
@@ -102,7 +103,7 @@
"http-proxy": "^1.18.1",
"node-fetch": "^2.6.1",
"ramda": "^0.27.2",
- "testcontainers": "^10.10.4",
+ "testcontainers": "^10.13.0",
"yargs": "^17.3.1"
},
"devDependencies": {
diff --git a/packages/cubejs-testing/test/smoke-lambda.test.ts b/packages/cubejs-testing/test/smoke-lambda.test.ts
index 3687ab25e5194..05369fec8197e 100644
--- a/packages/cubejs-testing/test/smoke-lambda.test.ts
+++ b/packages/cubejs-testing/test/smoke-lambda.test.ts
@@ -1,8 +1,8 @@
import R from 'ramda';
-import { StartedTestContainer } from 'testcontainers';
+import { StartedTestContainer, Network, StartedNetwork } from 'testcontainers';
import { pausePromise } from '@cubejs-backend/shared';
import fetch from 'node-fetch';
-import { PostgresDBRunner } from '@cubejs-backend/testing-shared';
+import { PostgresDBRunner, KafkaDBRunner, KsqlDBRunner } from '@cubejs-backend/testing-shared';
import cubejs, { CubeApi, Query } from '@cubejs-client/core';
// eslint-disable-next-line import/no-extraneous-dependencies
import { afterAll, beforeAll, expect, jest } from '@jest/globals';
@@ -30,6 +30,9 @@ describe('lambda', () => {
jest.setTimeout(60 * 5 * 1000);
let db: StartedTestContainer;
+ let network: StartedNetwork;
+ let dbKafka: StartedTestContainer;
+ let dbKsql: StartedTestContainer;
let birdbox: BirdBox;
let client: CubeApi;
let postgres: any;
@@ -38,6 +41,13 @@ describe('lambda', () => {
beforeAll(async () => {
db = await PostgresDBRunner.startContainer({});
await PostgresDBRunner.loadEcom(db);
+
+ network = await new Network().start();
+ dbKafka = await KafkaDBRunner.startContainer({ network });
+ dbKsql = await KsqlDBRunner.startContainer({ network });
+
+ await KsqlDBRunner.loadData(dbKsql);
+
birdbox = await getBirdbox(
'postgres',
{
@@ -50,6 +60,8 @@ describe('lambda', () => {
CUBEJS_DB_PASS: 'test',
CUBEJS_ROLLUP_ONLY: 'true',
CUBEJS_REFRESH_WORKER: 'false',
+ KSQL_URL: `http://${dbKsql.getHost()}:${dbKsql.getMappedPort(8088)}`,
+ KSQL_KAFKA_HOST: `${dbKafka.getHost()}:${dbKafka.getMappedPort(9093)}`,
},
{
schemaDir: 'lambda/schema',
@@ -79,9 +91,41 @@ describe('lambda', () => {
afterAll(async () => {
await birdbox.stop();
await db.stop();
+ await dbKafka.stop();
+ await dbKsql.stop();
+ await network.stop();
await cubestore.release();
}, JEST_AFTER_ALL_DEFAULT_TIMEOUT);
+ test('Query lambda with ksql ', async () => {
+ const query: Query = {
+ measures: ['Requests.count'],
+ dimensions: ['Requests.tenant_id', 'Requests.request_id'],
+ timeDimensions: [
+ {
+ dimension: 'Requests.timestamp',
+ granularity: 'day'
+ }
+ ],
+ };
+ // First call to trigger the pre-aggregation build
+ await client.load(query);
+ // We have to wait for cubestore to consume the data from Kafka. There is no way to know when it's done right now.
+ await pausePromise(5000);
+
+ const response = await client.load(query);
+
+ // @ts-ignore
+ expect(response.loadResponse.results[0].data.map(i => i['Requests.request_id'])).toEqual([
+ 'req-2',
+ 'req-1',
+ 'req-stream-2'
+ ]);
+
+ // @ts-ignore
+ expect(response.loadResponse.results[0].data.length).toEqual(3);
+ });
+
test('query', async () => {
const query: Query = {
measures: ['Orders.count'],
diff --git a/yarn.lock b/yarn.lock
index 6af9573d2f781..78642ca337f0a 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -4378,6 +4378,11 @@
resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.54.0.tgz#4fab9a2ff7860082c304f750e94acd644cf984cf"
integrity sha512-ut5V+D+fOoWPgGGNj83GGjnntO39xDy6DWxO0wb7Jp3DcMX0TfIqdzHF85VTQkerdyGmuuMD9AKAo5KiNlf/AQ==
+"@fastify/busboy@^2.0.0":
+ version "2.1.1"
+ resolved "https://registry.yarnpkg.com/@fastify/busboy/-/busboy-2.1.1.tgz#b9da6a878a371829a0502c9b6c1c143ef6663f4d"
+ integrity sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==
+
"@formatjs/ecma402-abstract@1.18.2":
version "1.18.2"
resolved "https://registry.yarnpkg.com/@formatjs/ecma402-abstract/-/ecma402-abstract-1.18.2.tgz#bf103712a406874eb1e387858d5be2371ab3aa14"
@@ -8228,6 +8233,13 @@
resolved "https://registry.yarnpkg.com/@tediousjs/connection-string/-/connection-string-0.5.0.tgz#9b3d858c040aac6bdf5584bf45370cef5b6522b4"
integrity sha512-7qSgZbincDDDFyRweCIEvZULFAw5iz/DeunhvuxpL31nfntX3P4Yd4HkHBRg9H8CdqY1e5WFN1PZIz/REL9MVQ==
+"@testcontainers/kafka@~10.13.0":
+ version "10.13.0"
+ resolved "https://registry.yarnpkg.com/@testcontainers/kafka/-/kafka-10.13.0.tgz#c8498d1534e38efc9269eaaaea65acbab5c9a0e4"
+ integrity sha512-r7glnNVUom9GaEH8WknTrB3+gFvtm5lDjgwcuLgGLRpZEKUHX+H2OP8JulA0sgfwvDEcdZHLPah+Aoc0OkuAdg==
+ dependencies:
+ testcontainers "^10.13.0"
+
"@tootallnate/once@1":
version "1.1.2"
resolved "https://registry.yarnpkg.com/@tootallnate/once/-/once-1.1.2.tgz#ccb91445360179a04e7fe6aff78c00ffc1eeaf82"
@@ -10579,6 +10591,19 @@ archiver-utils@^2.1.0:
normalize-path "^3.0.0"
readable-stream "^2.0.0"
+archiver-utils@^5.0.0, archiver-utils@^5.0.2:
+ version "5.0.2"
+ resolved "https://registry.yarnpkg.com/archiver-utils/-/archiver-utils-5.0.2.tgz#63bc719d951803efc72cf961a56ef810760dd14d"
+ integrity sha512-wuLJMmIBQYCsGZgYLTy5FIB2pF6Lfb6cXMSF8Qywwk3t20zWnAi7zLcQFdKQmIB8wyZpY5ER38x08GbwtR2cLA==
+ dependencies:
+ glob "^10.0.0"
+ graceful-fs "^4.2.0"
+ is-stream "^2.0.1"
+ lazystream "^1.0.0"
+ lodash "^4.17.15"
+ normalize-path "^3.0.0"
+ readable-stream "^4.0.0"
+
archiver@^5.3.2:
version "5.3.2"
resolved "https://registry.yarnpkg.com/archiver/-/archiver-5.3.2.tgz#99991d5957e53bd0303a392979276ac4ddccf3b0"
@@ -10592,6 +10617,19 @@ archiver@^5.3.2:
tar-stream "^2.2.0"
zip-stream "^4.1.0"
+archiver@^7.0.1:
+ version "7.0.1"
+ resolved "https://registry.yarnpkg.com/archiver/-/archiver-7.0.1.tgz#c9d91c350362040b8927379c7aa69c0655122f61"
+ integrity sha512-ZcbTaIqJOfCc03QwD468Unz/5Ir8ATtvAHsK+FdXbDIbGfihqh9mrvdcYunQzqn4HrvWWaFyaxJhGZagaJJpPQ==
+ dependencies:
+ archiver-utils "^5.0.2"
+ async "^3.2.4"
+ buffer-crc32 "^1.0.0"
+ readable-stream "^4.0.0"
+ readdir-glob "^1.1.2"
+ tar-stream "^3.0.0"
+ zip-stream "^6.0.1"
+
are-we-there-yet@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-2.0.0.tgz#372e0e7bd279d8e94c653aaa1f67200884bf3e1c"
@@ -11578,6 +11616,11 @@ buffer-crc32@^0.2.1, buffer-crc32@^0.2.13, buffer-crc32@~0.2.3:
resolved "https://registry.yarnpkg.com/buffer-crc32/-/buffer-crc32-0.2.13.tgz#0d333e3f00eac50aa1454abd30ef8c2a5d9a7242"
integrity sha1-DTM+PwDqxQqhRUq9MO+MKl2ackI=
+buffer-crc32@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/buffer-crc32/-/buffer-crc32-1.0.0.tgz#a10993b9055081d55304bd9feb4a072de179f405"
+ integrity sha512-Db1SbgBS/fg/392AblrMJk97KggmvYhr4pB5ZIMTWtaivCPMWLkmb7m21cJvpvgK+J3nsU2CmmixNBZx4vFj/w==
+
buffer-equal-constant-time@1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz#f8e71132f7ffe6e01a5c9697a4c6f3e48d5cc819"
@@ -12555,6 +12598,17 @@ compress-commons@^4.1.0:
normalize-path "^3.0.0"
readable-stream "^3.6.0"
+compress-commons@^6.0.2:
+ version "6.0.2"
+ resolved "https://registry.yarnpkg.com/compress-commons/-/compress-commons-6.0.2.tgz#26d31251a66b9d6ba23a84064ecd3a6a71d2609e"
+ integrity sha512-6FqVXeETqWPoGcfzrXb37E50NP0LXT8kAMu5ooZayhWWdgEY4lBEEcbQNXtkuKQsGduxiIcI4gOTsxTmuq/bSg==
+ dependencies:
+ crc-32 "^1.2.0"
+ crc32-stream "^6.0.0"
+ is-stream "^2.0.1"
+ normalize-path "^3.0.0"
+ readable-stream "^4.0.0"
+
compressible@^2.0.12, compressible@~2.0.16:
version "2.0.18"
resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.18.tgz#af53cca6b070d4c3c0750fbd77286a6d7cc46fba"
@@ -12936,6 +12990,14 @@ crc32-stream@^4.0.2:
crc-32 "^1.2.0"
readable-stream "^3.4.0"
+crc32-stream@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.yarnpkg.com/crc32-stream/-/crc32-stream-6.0.0.tgz#8529a3868f8b27abb915f6c3617c0fadedbf9430"
+ integrity sha512-piICUB6ei4IlTv1+653yq5+KoqfBYmj9bw6LqXoOneTMDXk5nM1qt12mFW1caG3LlJXEKW1Bp0WggEmIfQB34g==
+ dependencies:
+ crc-32 "^1.2.0"
+ readable-stream "^4.0.0"
+
create-react-class@^15.5.3:
version "15.7.0"
resolved "https://registry.yarnpkg.com/create-react-class/-/create-react-class-15.7.0.tgz#7499d7ca2e69bb51d13faf59bd04f0c65a1d6c1e"
@@ -16546,6 +16608,18 @@ glob@7.1.7:
once "^1.3.0"
path-is-absolute "^1.0.0"
+glob@^10.0.0:
+ version "10.4.5"
+ resolved "https://registry.yarnpkg.com/glob/-/glob-10.4.5.tgz#f4d9f0b90ffdbab09c9d77f5f29b4262517b0956"
+ integrity sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==
+ dependencies:
+ foreground-child "^3.1.0"
+ jackspeak "^3.1.2"
+ minimatch "^9.0.4"
+ minipass "^7.1.2"
+ package-json-from-dist "^1.0.0"
+ path-scurry "^1.11.1"
+
glob@^10.2.2, glob@^10.3.10, glob@^10.3.3:
version "10.3.10"
resolved "https://registry.yarnpkg.com/glob/-/glob-10.3.10.tgz#0351ebb809fd187fe421ab96af83d3a70715df4b"
@@ -18271,7 +18345,7 @@ is-stream@^1.0.1, is-stream@^1.1.0:
resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44"
integrity sha1-EtSj3U5o4Lec6428hBc66A2RykQ=
-is-stream@^2.0.0:
+is-stream@^2.0.0, is-stream@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.1.tgz#fac1e3d53b97ad5a9d0ae9cef2389f5810a5c077"
integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==
@@ -18512,6 +18586,15 @@ jackspeak@^2.3.5:
optionalDependencies:
"@pkgjs/parseargs" "^0.11.0"
+jackspeak@^3.1.2:
+ version "3.4.3"
+ resolved "https://registry.yarnpkg.com/jackspeak/-/jackspeak-3.4.3.tgz#8833a9d89ab4acde6188942bd1c53b6390ed5a8a"
+ integrity sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==
+ dependencies:
+ "@isaacs/cliui" "^8.0.2"
+ optionalDependencies:
+ "@pkgjs/parseargs" "^0.11.0"
+
java@^0.14, java@^0.14.0:
version "0.14.0"
resolved "https://registry.yarnpkg.com/java/-/java-0.14.0.tgz#28528f462aa23cca7b99e07fbc3130460ff70493"
@@ -20133,6 +20216,11 @@ lru-cache@^10.0.1:
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.1.0.tgz#2098d41c2dc56500e6c88584aa656c84de7d0484"
integrity sha512-/1clY/ui8CzjKFyjdvwPWJUYKiFVXG2I2cY0ssG7h4+hwk+XOIX7ZSG9Q7TW8TW3Kp3BUSqgFWBLgL4PJ+Blag==
+lru-cache@^10.2.0:
+ version "10.4.3"
+ resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.4.3.tgz#410fc8a17b70e598013df257c2446b7f3383f119"
+ integrity sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==
+
lru-cache@^4.0.1, lru-cache@^4.1.2, lru-cache@^4.1.3, lru-cache@^4.1.5:
version "4.1.5"
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.1.5.tgz#8bbe50ea85bed59bc9e33dcab8235ee9bcf443cd"
@@ -20657,6 +20745,13 @@ minimatch@^9.0.1:
dependencies:
brace-expansion "^2.0.1"
+minimatch@^9.0.4:
+ version "9.0.5"
+ resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.5.tgz#d74f9dd6b57d83d8e98cfb82133b03978bc929e5"
+ integrity sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==
+ dependencies:
+ brace-expansion "^2.0.1"
+
minimist-options@4.1.0, minimist-options@^4.0.2:
version "4.1.0"
resolved "https://registry.yarnpkg.com/minimist-options/-/minimist-options-4.1.0.tgz#c0655713c53a8a2ebd77ffa247d342c40f010619"
@@ -20774,6 +20869,11 @@ minipass@^3.1.6:
resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.0.4.tgz#dbce03740f50a4786ba994c1fb908844d27b038c"
integrity sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==
+minipass@^7.1.2:
+ version "7.1.2"
+ resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707"
+ integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==
+
minizlib@^1.3.3:
version "1.3.3"
resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.3.3.tgz#2290de96818a34c29551c8a8d301216bd65a861d"
@@ -22127,6 +22227,11 @@ p-waterfall@^2.1.1:
dependencies:
p-reduce "^2.0.0"
+package-json-from-dist@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/package-json-from-dist/-/package-json-from-dist-1.0.0.tgz#e501cd3094b278495eb4258d4c9f6d5ac3019f00"
+ integrity sha512-dATvCeZN/8wQsGywez1mzHtTlP22H8OEfPrVMLNr4/eGa+ijtLn/6M5f0dY8UKNrC2O9UCU6SSoG3qRKnt7STw==
+
package-json@^6.3.0:
version "6.5.0"
resolved "https://registry.yarnpkg.com/package-json/-/package-json-6.5.0.tgz#6feedaca35e75725876d0b0e64974697fed145b0"
@@ -22406,6 +22511,14 @@ path-scurry@^1.10.1:
lru-cache "^9.1.1 || ^10.0.0"
minipass "^5.0.0 || ^6.0.2 || ^7.0.0"
+path-scurry@^1.11.1:
+ version "1.11.1"
+ resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.11.1.tgz#7960a668888594a0720b12a911d1a742ab9f11d2"
+ integrity sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==
+ dependencies:
+ lru-cache "^10.2.0"
+ minipass "^5.0.0 || ^6.0.2 || ^7.0.0"
+
path-to-regexp@0.1.7:
version "0.1.7"
resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c"
@@ -24824,7 +24937,7 @@ readable-stream@3, readable-stream@^3.0.0, readable-stream@^3.0.2, readable-stre
string_decoder "^1.1.1"
util-deprecate "^1.0.1"
-readable-stream@^4.2.0:
+readable-stream@^4.0.0, readable-stream@^4.2.0:
version "4.5.2"
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-4.5.2.tgz#9e7fc4c45099baeed934bff6eb97ba6cf2729e09"
integrity sha512-yjavECdqeZ3GLXNgRXgeQEdz9fvDDkNKyHnbHRFtOr7/LcfgBcmct7t/ET+HaCTqfh06OzoAxrkN/IfjJBVe+g==
@@ -27177,7 +27290,7 @@ tar-stream@^2.0.0, tar-stream@^2.1.4, tar-stream@^2.2.0:
inherits "^2.0.3"
readable-stream "^3.1.1"
-tar-stream@^3.1.5:
+tar-stream@^3.0.0, tar-stream@^3.1.5:
version "3.1.7"
resolved "https://registry.yarnpkg.com/tar-stream/-/tar-stream-3.1.7.tgz#24b3fb5eabada19fe7338ed6d26e5f7c482e792b"
integrity sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==
@@ -27406,6 +27519,27 @@ testcontainers@^10.10.4:
tar-fs "^3.0.6"
tmp "^0.2.3"
+testcontainers@^10.13.0:
+ version "10.13.0"
+ resolved "https://registry.yarnpkg.com/testcontainers/-/testcontainers-10.13.0.tgz#cc70601db4f7a95d8a3ad669613450bd56b022ee"
+ integrity sha512-SDblQvirbJw1ZpenxaAairGtAesw5XMOCHLbRhTTUBJtBkZJGce8Vx/I8lXQxWIM8HRXsg3HILTHGQvYo4x7wQ==
+ dependencies:
+ "@balena/dockerignore" "^1.0.2"
+ "@types/dockerode" "^3.3.29"
+ archiver "^7.0.1"
+ async-lock "^1.4.1"
+ byline "^5.0.0"
+ debug "^4.3.5"
+ docker-compose "^0.24.8"
+ dockerode "^3.3.5"
+ get-port "^5.1.1"
+ proper-lockfile "^4.1.2"
+ properties-reader "^2.3.0"
+ ssh-remote-port-forward "^1.0.4"
+ tar-fs "^3.0.6"
+ tmp "^0.2.3"
+ undici "^5.28.4"
+
text-decoder@^1.1.0:
version "1.1.1"
resolved "https://registry.yarnpkg.com/text-decoder/-/text-decoder-1.1.1.tgz#5df9c224cebac4a7977720b9f083f9efa1aefde8"
@@ -28037,6 +28171,13 @@ undici-types@~5.26.4:
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==
+undici@^5.28.4:
+ version "5.28.4"
+ resolved "https://registry.yarnpkg.com/undici/-/undici-5.28.4.tgz#6b280408edb6a1a604a9b20340f45b422e373068"
+ integrity sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==
+ dependencies:
+ "@fastify/busboy" "^2.0.0"
+
unescape@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/unescape/-/unescape-1.0.1.tgz#956e430f61cad8a4d57d82c518f5e6cc5d0dda96"
@@ -29574,3 +29715,12 @@ zip-stream@^4.1.0:
archiver-utils "^2.1.0"
compress-commons "^4.1.0"
readable-stream "^3.6.0"
+
+zip-stream@^6.0.1:
+ version "6.0.1"
+ resolved "https://registry.yarnpkg.com/zip-stream/-/zip-stream-6.0.1.tgz#e141b930ed60ccaf5d7fa9c8260e0d1748a2bbfb"
+ integrity sha512-zK7YHHz4ZXpW89AHXUPbQVGKI7uvkd3hzusTdotCg1UxyaVtg0zFJSTfW/Dq5f7OBBVnq6cZIaC8Ti4hb6dtCA==
+ dependencies:
+ archiver-utils "^5.0.0"
+ compress-commons "^6.0.2"
+ readable-stream "^4.0.0"
From ebfdb627ea292ca1228ae191af1daeac4ff91540 Mon Sep 17 00:00:00 2001
From: Julio Avila
Date: Wed, 11 Sep 2024 05:50:16 -0600
Subject: [PATCH 017/415] docs: Fix import logic and make example easier to
understand (#8688)
---
.../dynamic/code-reusability-export-and-import.mdx | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/docs/pages/product/data-modeling/dynamic/code-reusability-export-and-import.mdx b/docs/pages/product/data-modeling/dynamic/code-reusability-export-and-import.mdx
index 6cd8d3b20dc45..a5e3933db39bd 100644
--- a/docs/pages/product/data-modeling/dynamic/code-reusability-export-and-import.mdx
+++ b/docs/pages/product/data-modeling/dynamic/code-reusability-export-and-import.mdx
@@ -103,19 +103,20 @@ which is located in a parent directory.
├── README.md
├── cube.js
├── package.json
-└── model
- ├── utils.js
- └── sales
+└── model/
+ ├── shared_utils/
+ │ └── utils.js
+ └── sales/
└── orders.js
```
```javascript
// in model/sales/orders.js
-import { capitalize } from "./schema_utils";
+import { capitalize } from "./shared_utils/utils";
```
```javascript
-// in model/utils.js
+// in model/shared_utils/utils.js
export const capitalize = (s) => s.charAt(0).toUpperCase() + s.slice(1);
```
@@ -124,4 +125,4 @@ export const capitalize = (s) => s.charAt(0).toUpperCase() + s.slice(1);
https://developer.mozilla.org/en-US/docs/web/javascript/reference/statements/export
[mdn-js-es6-import]:
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/import
-[ref-schema-string-time-dims]: /guides/recipes/data-modeling/string-time-dimensions
\ No newline at end of file
+[ref-schema-string-time-dims]: /guides/recipes/data-modeling/string-time-dimensions
From db2256d30e6ee22ec7a06412f7d1ed9cb0dc15a4 Mon Sep 17 00:00:00 2001
From: Igor Lukanin
Date: Wed, 11 Sep 2024 14:38:57 +0200
Subject: [PATCH 018/415] docs: Fix DEPRECATION.md
---
DEPRECATION.md | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/DEPRECATION.md b/DEPRECATION.md
index 67d61b8d3f890..f7486c883afa8 100644
--- a/DEPRECATION.md
+++ b/DEPRECATION.md
@@ -60,6 +60,7 @@ features:
| Deprecated | [Node.js 16](#nodejs-16) | v0.35.0 | |
| Removed | [MySQL-based SQL API](#mysql-based-sql-api) | v0.35.0 | v0.35.0 |
| Removed | [`initApp` hook](#initapp-hook) | v0.35.0 | v0.35.0 |
+| Deprecated | [`/v1/run-scheduled-refresh` REST API endpoint](#v1run-scheduled-refresh-rest-api-endpoint) | v0.35.0 | |
### Node.js 8
@@ -392,4 +393,13 @@ Early prototype of the MySQL-based SQL API is removed in favor of the Postgres-c
**Removed in release: v0.35.0**
-The `initApp` hook is removed as it's not relevant anymore for Docker-based architecture.
\ No newline at end of file
+The `initApp` hook is removed as it's not relevant anymore for Docker-based architecture.
+
+### `/v1/run-scheduled-refresh` REST API endpoint
+
+**Deprecated in release: v0.35.0**
+
+The `/v1/run-scheduled-refresh` REST API endpoint is deprecated as it's not
+relevant anymore for Docker-based architecture. Use the [Orchestration
+API](https://cube.dev/docs/product/apis-integrations/orchestration-api) and
+`/v1/pre-aggregations/jobs` endpoint instead.
\ No newline at end of file
From 64788dea89b0244911518de203929fc5c773cd8f Mon Sep 17 00:00:00 2001
From: Konstantin Burkalev
Date: Thu, 12 Sep 2024 12:27:17 +0300
Subject: [PATCH 019/415] feat(cubestore): Support date_bin function (#8672)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* feat(cubestore): implement date_bin()
* add tests for date_bin udf in cubestore
* remove obsolete stuff
* remove debug logs + rename
* remove debug logs
---
.../cubestore-sql-tests/src/tests.rs | 391 +++++++++++++-----
.../cubestore/src/queryplanner/mod.rs | 1 +
.../cubestore/src/queryplanner/udfs.rs | 238 ++++++++++-
3 files changed, 520 insertions(+), 110 deletions(-)
diff --git a/rust/cubestore/cubestore-sql-tests/src/tests.rs b/rust/cubestore/cubestore-sql-tests/src/tests.rs
index 8d918cd7926c8..7b662d463c4d8 100644
--- a/rust/cubestore/cubestore-sql-tests/src/tests.rs
+++ b/rust/cubestore/cubestore-sql-tests/src/tests.rs
@@ -203,6 +203,7 @@ pub fn sql_tests() -> Vec<(&'static str, TestFn)> {
t("float_index", float_index),
t("float_order", float_order),
t("date_add", date_add),
+ t("date_bin", date_bin),
t("now", now),
t("dump", dump),
t("unsorted_merge_assertion", unsorted_merge_assertion),
@@ -2515,7 +2516,7 @@ async fn create_table_with_location_and_hyperloglog(service: Box)
.await
.unwrap();
let _ = service
- .exec_query(&format!("CREATE TABLE hll.locations (id int, hll hyperloglog, hll_base hyperloglog) LOCATION {}",
+ .exec_query(&format!("CREATE TABLE hll.locations (id int, hll hyperloglog, hll_base hyperloglog) LOCATION {}",
paths
.into_iter()
.map(|p| format!("'{}'", p.to_string_lossy()))
@@ -2567,7 +2568,7 @@ async fn create_table_with_location_and_hyperloglog_postgress(service: Box) {
);
}
+async fn date_bin(service: Box) {
+ let check_fn = |interval, source, origin, expected| {
+ let expected = timestamp_from_string(expected).unwrap();
+ let service = &service;
+ async move {
+ let actual = service
+ .exec_query(&format!(
+ "SELECT DATE_BIN(INTERVAL '{}', CAST('{}' as TIMESTAMP), CAST('{}' as TIMESTAMP))",
+ interval, source, origin
+ ))
+ .await
+ .unwrap();
+ assert_eq!(to_rows(&actual), rows(&[expected]));
+ }
+ };
+
+ // Common dates
+ check_fn(
+ "1 month",
+ "2024-01-21T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "1 month",
+ "2023-11-21T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2023-11-01T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "1 month",
+ "2024-02-21T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2024-02-01T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "2 month",
+ "2024-04-25T01:00:00Z",
+ "2024-01-20T01:00:00Z",
+ "2024-03-20T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "2 month",
+ "2024-04-15T01:00:00Z",
+ "2024-01-20T01:00:00Z",
+ "2024-03-20T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "2 month",
+ "2024-05-25T01:00:00Z",
+ "2024-01-20T01:00:00Z",
+ "2024-05-20T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "2 month",
+ "2024-05-15T01:00:00Z",
+ "2024-01-20T01:00:00Z",
+ "2024-03-20T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "2 month",
+ "2023-11-25T01:00:00Z",
+ "2024-01-20T01:00:00Z",
+ "2023-11-20T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "2 month",
+ "2023-11-15T01:00:00Z",
+ "2024-01-20T01:00:00Z",
+ "2023-09-20T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "10 day",
+ "2024-01-25T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2024-01-21T01:00:00Z",
+ )
+ .await;
+ check_fn(
+ "10 day 2 hour 5 minute 10 second",
+ "2024-01-15T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2024-01-11T03:05:10.000Z",
+ )
+ .await;
+ check_fn(
+ "10 day 2 hour 5 minute 10 second",
+ "2024-01-30T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2024-01-21T05:10:20.000Z",
+ )
+ .await;
+ check_fn(
+ "10 day 2 hour 5 minute 10 second",
+ "2023-12-30T01:00:00Z",
+ "2024-01-01T01:00:00Z",
+ "2023-12-21T22:54:50.000Z",
+ )
+ .await;
+
+ // Nulls
+ let r = service
+ .exec_query(
+ "SELECT DATE_BIN(INTERVAL '1 month', CAST(NULL as timestamp), CAST('2023-12-30T01:00:00Z' AS timestamp))",
+ )
+ .await
+ .unwrap();
+ assert_eq!(to_rows(&r), rows(&[(NULL)]));
+
+ // Invalid number of args
+ service.exec_query("SELECT DATE_BIN(1)").await.unwrap_err();
+ service
+ .exec_query("SELECT DATE_BIN(1, 2)")
+ .await
+ .unwrap_err();
+ service
+ .exec_query("SELECT DATE_BIN(1, 2, 3, 4, 5)")
+ .await
+ .unwrap_err();
+
+ // Invalid types
+ service
+ .exec_query("SELECT DATE_BIN(NULL, CAST('2023-12-30T01:00:00Z' AS timestamp), CAST('2023-12-30T01:00:00Z' AS timestamp))")
+ .await
+ .unwrap_err();
+ service
+ .exec_query(
+ "SELECT DATE_BIN(INTERVAL '1 month', 1, CAST('2023-12-30T01:00:00Z' AS timestamp))",
+ )
+ .await
+ .unwrap_err();
+ service
+ .exec_query(
+ "SELECT DATE_BIN(INTERVAL '1 month', CAST('2023-12-30T01:00:00Z' AS timestamp), true)",
+ )
+ .await
+ .unwrap_err();
+
+ // Columnar data
+ service.exec_query("CREATE SCHEMA s").await.unwrap();
+ service
+ .exec_query("CREATE TABLE s.data(t timestamp)")
+ .await
+ .unwrap();
+ service
+ .exec_query(
+ "INSERT INTO s.data(t) VALUES ('2024-01-21T01:00:00Z'), ('2023-11-21T01:00:00Z'), ('2024-02-21T01:00:00Z'), (NULL)",
+ )
+ .await
+ .unwrap();
+ let r = service
+ .exec_query("SELECT DATE_BIN(INTERVAL '1 month', t, CAST('2024-01-01T01:00:00Z' AS timestamp)) FROM s.data ORDER BY 1")
+ .await
+ .unwrap();
+ assert_eq!(
+ to_rows(&r),
+ rows(&[
+ Some(timestamp_from_string("2023-11-01T01:00:00Z").unwrap()),
+ Some(timestamp_from_string("2024-01-01T01:00:00Z").unwrap()),
+ Some(timestamp_from_string("2024-02-01T01:00:00Z").unwrap()),
+ None,
+ ]),
+ );
+}
+
async fn unsorted_merge_assertion(service: Box) {
service.exec_query("CREATE SCHEMA s").await.unwrap();
service
@@ -6014,7 +6189,7 @@ async fn unique_key_and_multi_partitions(service: Box) {
.exec_query(
"SELECT a, b FROM (
SELECT * FROM test.unique_parts1
- UNION ALL
+ UNION ALL
SELECT * FROM test.unique_parts2
) `tt` GROUP BY 1, 2 ORDER BY 1, 2 LIMIT 100",
)
@@ -6074,7 +6249,7 @@ async fn unique_key_and_multi_partitions_hash_aggregate(service: Box) {
service
.exec_query("CREATE TABLE s.Orders(a int, b int, c int, a_sum int, a_max int, a_min int, a_merge HYPERLOGLOG)
AGGREGATIONS(sum(a_sum), max(a_max), min(a_min), merge(a_merge))
- INDEX reg_index (a, b)
+ INDEX reg_index (a, b)
AGGREGATE INDEX aggr_index (a, b)
")
.await
@@ -6239,7 +6414,7 @@ async fn aggregate_index(service: Box) {
.exec_query(
"CREATE TABLE s.Orders(a int, b int, c int, a_sum int, a_max int, a_min int)
AGGREGATIONS(sum(a_sum), max(a_max), min(a_min))
- INDEX reg_index (a, b)
+ INDEX reg_index (a, b)
AGGREGATE INDEX aggr_index (a, b)
",
)
@@ -6335,7 +6510,7 @@ async fn aggregate_index_with_hll_bytes(service: Box) {
.exec_query(
"CREATE TABLE s.Orders(a int, b int, hll bytes)
AGGREGATIONS(merge(hll))
- AGGREGATE INDEX agg_index (a, b)
+ AGGREGATE INDEX agg_index (a, b)
",
)
.await
@@ -7098,9 +7273,9 @@ async fn limit_pushdown_group(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT id, SUM(n) FROM (
- SELECT * FROM foo.pushdown1
+ SELECT * FROM foo.pushdown1
union all
- SELECT * FROM foo.pushdown2
+ SELECT * FROM foo.pushdown2
) as `tb` GROUP BY 1 LIMIT 3",
None,
false,
@@ -7162,9 +7337,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a `aa`, b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2 ORDER BY 1 LIMIT 3",
Some("ind1"),
true,
@@ -7198,9 +7373,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2 ORDER BY 1, 2 LIMIT 3",
Some("ind1"),
true,
@@ -7235,9 +7410,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2 ORDER BY 2 LIMIT 3",
Some("ind1"),
false,
@@ -7270,9 +7445,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2 ORDER BY 1, 2 DESC LIMIT 3",
Some("ind1"),
false,
@@ -7305,9 +7480,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2 ORDER BY 1 DESC, 2 DESC LIMIT 3",
Some("ind1"),
true,
@@ -7340,9 +7515,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2 ORDER BY 1 DESC LIMIT 3",
Some("ind1"),
true,
@@ -7376,9 +7551,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, n FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2,3 ORDER BY 1 DESC, 2 DESC, 3 DESC LIMIT 3",
Some("default"),
true,
@@ -7412,9 +7587,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT b, SUM(n) FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1 ORDER BY 1 LIMIT 3",
Some("ind2"),
true,
@@ -7436,9 +7611,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, n FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 1, 2, 3 ORDER BY 1, 2 LIMIT 3",
Some("default"),
true,
@@ -7470,9 +7645,9 @@ async fn limit_pushdown_group_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, n FROM (
- SELECT * FROM foo.pushdown_group1
+ SELECT * FROM foo.pushdown_group1
union all
- SELECT * FROM foo.pushdown_group2
+ SELECT * FROM foo.pushdown_group2
) as `tb` GROUP BY 3, 1, 2 ORDER BY 1, 2 LIMIT 3",
Some("default"),
true,
@@ -7546,10 +7721,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(c) FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 12
GROUP BY 1, 2 ORDER BY 2 LIMIT 3",
Some("ind1"),
@@ -7583,10 +7758,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, b, SUM(c) FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 12
GROUP BY 1, 2 ORDER BY 2 DESC LIMIT 3",
Some("ind1"),
@@ -7620,10 +7795,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE b = 18
GROUP BY a, b, c ORDER BY a, c LIMIT 3",
Some("ind1"),
@@ -7645,10 +7820,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE b = 18
GROUP BY a, b, c ORDER BY a DESC, c LIMIT 3",
Some("ind1"),
@@ -7670,10 +7845,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT a, c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE b = 18
GROUP BY a, b, c ORDER BY a DESC, c DESC LIMIT 3",
Some("ind1"),
@@ -7696,10 +7871,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 11 and b = 18
GROUP BY a, b, c ORDER BY c LIMIT 3",
Some("ind1"),
@@ -7720,10 +7895,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 11 and b = 18
GROUP BY a, b, c ORDER BY c DESC LIMIT 3",
Some("ind1"),
@@ -7744,10 +7919,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a = 11 and b = 18
GROUP BY b, a, c ORDER BY c LIMIT 3",
Some("ind1"),
@@ -7769,10 +7944,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT c FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE a >= 11 and a < 12 and b = 18
GROUP BY a, b, c ORDER BY c LIMIT 3",
Some("ind1"),
@@ -7794,10 +7969,10 @@ async fn limit_pushdown_group_where_order(service: Box) {
let res = assert_limit_pushdown(
&service,
"SELECT b FROM (
- SELECT * FROM foo.pushdown_where_group1
+ SELECT * FROM foo.pushdown_where_group1
union all
- SELECT * FROM foo.pushdown_where_group2
- ) as `tb`
+ SELECT * FROM foo.pushdown_where_group2
+ ) as `tb`
WHERE c = 11
GROUP BY b, c ORDER BY b LIMIT 3",
Some("ind2"),
@@ -7855,10 +8030,10 @@ async fn limit_pushdown_without_group(service: Box